summaryrefslogtreecommitdiff
path: root/deps/v8
diff options
context:
space:
mode:
authorRyan Dahl <ry@tinyclouds.org>2010-08-17 08:37:25 -0700
committerRyan Dahl <ry@tinyclouds.org>2010-08-17 08:37:25 -0700
commit91757fa8400654b69de18e3840c402e369521b68 (patch)
tree0ee925b614e1efda3821a313f9b77b7152752704 /deps/v8
parentd4f4380f7ea04ee045fae1661242a5a7f2f1267a (diff)
downloadnode-91757fa8400654b69de18e3840c402e369521b68.tar.gz
Upgrade V8 to 2.3.8
Diffstat (limited to 'deps/v8')
-rw-r--r--deps/v8/AUTHORS1
-rw-r--r--deps/v8/ChangeLog42
-rw-r--r--deps/v8/SConstruct3
-rw-r--r--deps/v8/include/v8-profiler.h30
-rw-r--r--deps/v8/include/v8.h10
-rwxr-xr-xdeps/v8/src/SConscript8
-rwxr-xr-xdeps/v8/src/SConscript.orig324
-rw-r--r--deps/v8/src/accessors.cc2
-rw-r--r--deps/v8/src/api.cc121
-rw-r--r--deps/v8/src/arm/assembler-arm-inl.h23
-rw-r--r--deps/v8/src/arm/assembler-arm.cc15
-rw-r--r--deps/v8/src/arm/assembler-arm.h4
-rw-r--r--deps/v8/src/arm/builtins-arm.cc25
-rw-r--r--deps/v8/src/arm/codegen-arm.cc666
-rw-r--r--deps/v8/src/arm/codegen-arm.h23
-rw-r--r--deps/v8/src/arm/debug-arm.cc8
-rw-r--r--deps/v8/src/arm/disasm-arm.cc8
-rw-r--r--deps/v8/src/arm/fast-codegen-arm.cc241
-rw-r--r--deps/v8/src/arm/full-codegen-arm.cc184
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc12
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h3
-rw-r--r--deps/v8/src/arm/simulator-arm.cc10
-rw-r--r--deps/v8/src/arm/stub-cache-arm.cc32
-rw-r--r--deps/v8/src/assembler.h1
-rw-r--r--deps/v8/src/bootstrapper.cc18
-rw-r--r--deps/v8/src/builtins.h2
-rw-r--r--deps/v8/src/checks.h10
-rw-r--r--deps/v8/src/codegen.cc15
-rw-r--r--deps/v8/src/codegen.h24
-rwxr-xr-xdeps/v8/src/compiler.cc28
-rw-r--r--deps/v8/src/compiler.h53
-rw-r--r--deps/v8/src/contexts.h2
-rw-r--r--deps/v8/src/cpu-profiler.cc11
-rw-r--r--deps/v8/src/cpu-profiler.h2
-rw-r--r--deps/v8/src/debug.cc33
-rw-r--r--deps/v8/src/debug.h10
-rw-r--r--deps/v8/src/factory.cc14
-rw-r--r--deps/v8/src/factory.h4
-rw-r--r--deps/v8/src/fast-codegen.cc746
-rw-r--r--deps/v8/src/fast-codegen.h161
-rw-r--r--deps/v8/src/flag-definitions.h3
-rw-r--r--deps/v8/src/full-codegen.cc5
-rw-r--r--deps/v8/src/full-codegen.h9
-rw-r--r--deps/v8/src/globals.h25
-rw-r--r--deps/v8/src/handles-inl.h2
-rw-r--r--deps/v8/src/handles.cc35
-rw-r--r--deps/v8/src/handles.h3
-rw-r--r--deps/v8/src/heap-profiler.cc6
-rw-r--r--deps/v8/src/heap.cc522
-rw-r--r--deps/v8/src/heap.h6
-rw-r--r--deps/v8/src/ia32/assembler-ia32-inl.h24
-rw-r--r--deps/v8/src/ia32/assembler-ia32.cc15
-rw-r--r--deps/v8/src/ia32/assembler-ia32.h1
-rw-r--r--deps/v8/src/ia32/builtins-ia32.cc22
-rw-r--r--deps/v8/src/ia32/codegen-ia32.cc722
-rw-r--r--deps/v8/src/ia32/codegen-ia32.h33
-rw-r--r--deps/v8/src/ia32/debug-ia32.cc44
-rw-r--r--deps/v8/src/ia32/disasm-ia32.cc1
-rw-r--r--deps/v8/src/ia32/fast-codegen-ia32.cc954
-rw-r--r--deps/v8/src/ia32/fast-codegen-ia32.h155
-rw-r--r--deps/v8/src/ia32/full-codegen-ia32.cc175
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc66
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h33
-rw-r--r--deps/v8/src/ia32/stub-cache-ia32.cc32
-rw-r--r--deps/v8/src/ia32/virtual-frame-ia32.h16
-rw-r--r--deps/v8/src/list-inl.h7
-rw-r--r--deps/v8/src/list.h2
-rw-r--r--deps/v8/src/liveedit-debugger.js7
-rw-r--r--deps/v8/src/liveedit.cc42
-rw-r--r--deps/v8/src/mark-compact.cc256
-rw-r--r--deps/v8/src/mark-compact.h10
-rw-r--r--deps/v8/src/messages.js12
-rw-r--r--deps/v8/src/mips/debug-mips.cc9
-rw-r--r--deps/v8/src/mips/simulator-mips.cc7
-rw-r--r--deps/v8/src/objects-inl.h112
-rw-r--r--deps/v8/src/objects-visiting.cc139
-rw-r--r--deps/v8/src/objects-visiting.h382
-rw-r--r--deps/v8/src/objects.cc178
-rw-r--r--deps/v8/src/objects.h140
-rw-r--r--deps/v8/src/parser.cc9
-rw-r--r--deps/v8/src/platform-linux.cc5
-rw-r--r--deps/v8/src/platform-nullos.cc6
-rw-r--r--deps/v8/src/platform-posix.cc7
-rw-r--r--deps/v8/src/platform-win32.cc5
-rw-r--r--deps/v8/src/platform.h3
-rw-r--r--deps/v8/src/platform.h.orig580
-rw-r--r--deps/v8/src/profile-generator-inl.h18
-rw-r--r--deps/v8/src/profile-generator.cc1234
-rw-r--r--deps/v8/src/profile-generator.h491
-rw-r--r--deps/v8/src/property.h6
-rw-r--r--deps/v8/src/runtime.cc16
-rw-r--r--deps/v8/src/runtime.js8
-rw-r--r--deps/v8/src/serialize.cc8
-rw-r--r--deps/v8/src/serialize.h2
-rw-r--r--deps/v8/src/stub-cache.cc17
-rw-r--r--deps/v8/src/stub-cache.h3
-rw-r--r--deps/v8/src/third_party/dtoa/dtoa.c129
-rw-r--r--deps/v8/src/top.cc12
-rw-r--r--deps/v8/src/type-info.h16
-rw-r--r--deps/v8/src/utils.h6
-rw-r--r--deps/v8/src/version.cc4
-rw-r--r--deps/v8/src/x64/assembler-x64-inl.h23
-rw-r--r--deps/v8/src/x64/assembler-x64.cc2
-rw-r--r--deps/v8/src/x64/builtins-x64.cc22
-rw-r--r--deps/v8/src/x64/codegen-x64.cc312
-rw-r--r--deps/v8/src/x64/codegen-x64.h22
-rw-r--r--deps/v8/src/x64/debug-x64.cc32
-rw-r--r--deps/v8/src/x64/fast-codegen-x64.cc250
-rw-r--r--deps/v8/src/x64/full-codegen-x64.cc181
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc18
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h3
-rw-r--r--deps/v8/src/x64/stub-cache-x64.cc24
-rw-r--r--deps/v8/test/cctest/test-api.cc101
-rw-r--r--deps/v8/test/cctest/test-cpu-profiler.cc15
-rw-r--r--deps/v8/test/cctest/test-disasm-ia32.cc2
-rw-r--r--deps/v8/test/cctest/test-heap-profiler.cc224
-rw-r--r--deps/v8/test/mjsunit/api-call-after-bypassed-exception.js52
-rw-r--r--deps/v8/test/mjsunit/bitops-info.js77
-rw-r--r--deps/v8/test/mjsunit/debug-clearbreakpointgroup.js234
-rw-r--r--deps/v8/test/mjsunit/debug-evaluate-bool-constructor.js160
-rw-r--r--deps/v8/test/mjsunit/debug-references.js236
-rw-r--r--deps/v8/test/mjsunit/debug-stepin-accessor.js496
-rw-r--r--deps/v8/test/mjsunit/debug-stepin-builtin.js156
-rw-r--r--deps/v8/test/mjsunit/debug-stepin-call-function-stub.js230
-rw-r--r--deps/v8/test/mjsunit/debug-stepin-function-call.js296
-rw-r--r--deps/v8/test/mjsunit/debug-stepnext-do-while.js158
-rw-r--r--deps/v8/test/mjsunit/debug-stepout-recursive-function.js212
-rw-r--r--deps/v8/test/mjsunit/debug-stepout-to-builtin.js168
-rw-r--r--deps/v8/test/mjsunit/for-in-delete.js50
-rw-r--r--deps/v8/test/mjsunit/fuzz-natives.js3
-rw-r--r--deps/v8/test/mjsunit/global-deleted-property-keyed.js76
-rw-r--r--deps/v8/test/mjsunit/mjsunit.status4
-rw-r--r--deps/v8/test/mjsunit/object-literal.js32
-rwxr-xr-xdeps/v8/test/mjsunit/regexp-capture.js114
-rw-r--r--deps/v8/test/mjsunit/regress/bitops-register-alias.js31
-rw-r--r--deps/v8/test/mjsunit/regress/regress-246.js60
-rw-r--r--deps/v8/test/mjsunit/regress/regress-760-1.js49
-rw-r--r--deps/v8/test/mjsunit/regress/regress-760-2.js49
-rw-r--r--deps/v8/test/mjsunit/regress/regress-798.js109
-rw-r--r--deps/v8/test/mjsunit/regress/regress-815.js49
-rw-r--r--deps/v8/test/sputnik/sputnik.status4
-rwxr-xr-xdeps/v8/tools/gc-nvp-trace-processor.py69
-rw-r--r--deps/v8/tools/gyp/v8.gyp11
-rw-r--r--deps/v8/tools/oom_dump/README30
-rw-r--r--deps/v8/tools/oom_dump/SConstruct42
-rw-r--r--deps/v8/tools/oom_dump/oom_dump.cc285
-rw-r--r--deps/v8/tools/v8.xcodeproj/project.pbxproj24
-rw-r--r--deps/v8/tools/visual_studio/v8_base.vcproj21
-rw-r--r--deps/v8/tools/visual_studio/v8_base_arm.vcproj19
-rw-r--r--deps/v8/tools/visual_studio/v8_base_x64.vcproj19
150 files changed, 8072 insertions, 6510 deletions
diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS
index 4d87db3be..65b8965f1 100644
--- a/deps/v8/AUTHORS
+++ b/deps/v8/AUTHORS
@@ -29,4 +29,5 @@ Rodolph Perfetta <rodolph.perfetta@arm.com>
Ryan Dahl <coldredlemur@gmail.com>
Subrato K De <subratokde@codeaurora.org>
Burcu Dogan <burcujdogan@gmail.com>
+Vlad Burlik <vladbph@gmail.com>
diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog
index b5663744d..4c96de014 100644
--- a/deps/v8/ChangeLog
+++ b/deps/v8/ChangeLog
@@ -1,11 +1,45 @@
+2010-08-16: Version 2.3.8
+
+ Fixed build with strict aliasing on GCC 4.4 (issue 463).
+
+ Fixed issue with incorrect handling of custom valueOf methods on
+ string wrappers (issue 760).
+
+ Fixed compilation for ARMv4 (issue 590).
+
+ Improved performance.
+
+
+2010-08-11: Version 2.3.7
+
+ Reduced size of heap snapshots produced by heap profiler (issue 783).
+
+ Introduced v8::Value::IsRegExp method.
+
+ Fixed CPU profiler crash in start / stop sequence when non-existent
+ name is passed (issue http://crbug.com/51594).
+
+ Introduced new indexed property query callbacks API (issue 816). This
+ API is guarded by USE_NEW_QUERY_CALLBACK define and is disabled
+ by default.
+
+ Removed support for object literal get/set with number/string
+ property name.
+
+ Fixed handling of JSObject::elements in CalculateNetworkSize
+ (issue 822).
+
+ Allow compiling with strict aliasing enabled on GCC 4.4 (issue 463).
+
+
2010-08-09: Version 2.3.6
- RegExp literals create a new object every time they are evaluated
- (issue 704).
+ RegExp literals create a new object every time they are evaluated
+ (issue 704).
- Object.seal and Object.freeze return the modified object (issue 809).
+ Object.seal and Object.freeze return the modified object (issue 809).
- Fix building using GCC 4.4.4.
+ Fix building using GCC 4.4.4.
2010-08-04: Version 2.3.5
diff --git a/deps/v8/SConstruct b/deps/v8/SConstruct
index 00b8fb721..8fc192637 100644
--- a/deps/v8/SConstruct
+++ b/deps/v8/SConstruct
@@ -58,7 +58,7 @@ else:
# on linux we need these compiler flags to avoid crashes in the v8 test suite
# and avoid dtoa.c strict aliasing issues
if os.environ.get('GCC_VERSION') == '44':
- GCC_EXTRA_CCFLAGS = ['-fno-tree-vrp', '-fno-strict-aliasing']
+ GCC_EXTRA_CCFLAGS = ['-fno-tree-vrp']
GCC_DTOA_EXTRA_CCFLAGS = []
else:
GCC_EXTRA_CCFLAGS = []
@@ -80,7 +80,6 @@ ANDROID_FLAGS = ['-march=armv7-a',
'-frerun-cse-after-loop',
'-frename-registers',
'-fomit-frame-pointer',
- '-fno-strict-aliasing',
'-finline-limit=64',
'-DCAN_USE_VFP_INSTRUCTIONS=1',
'-DCAN_USE_ARMV7_INSTRUCTIONS=1',
diff --git a/deps/v8/include/v8-profiler.h b/deps/v8/include/v8-profiler.h
index c99eb0d9f..9e3cb873c 100644
--- a/deps/v8/include/v8-profiler.h
+++ b/deps/v8/include/v8-profiler.h
@@ -194,10 +194,10 @@ class HeapGraphNode;
class V8EXPORT HeapGraphEdge {
public:
enum Type {
- CONTEXT_VARIABLE = 0, // A variable from a function context.
- ELEMENT = 1, // An element of an array.
- PROPERTY = 2, // A named object property.
- INTERNAL = 3 // A link that can't be accessed from JS,
+ kContextVariable = 0, // A variable from a function context.
+ kElement = 1, // An element of an array.
+ kProperty = 2, // A named object property.
+ kInternal = 3 // A link that can't be accessed from JS,
// thus, its name isn't a real property name.
};
@@ -240,12 +240,12 @@ class V8EXPORT HeapGraphPath {
class V8EXPORT HeapGraphNode {
public:
enum Type {
- INTERNAL = 0, // Internal node, a virtual one, for housekeeping.
- ARRAY = 1, // An array of elements.
- STRING = 2, // A string.
- OBJECT = 3, // A JS object (except for arrays and strings).
- CODE = 4, // Compiled code.
- CLOSURE = 5 // Function closure.
+ kInternal = 0, // Internal node, a virtual one, for housekeeping.
+ kArray = 1, // An array of elements.
+ kString = 2, // A string.
+ kObject = 3, // A JS object (except for arrays and strings).
+ kCode = 4, // Compiled code.
+ kClosure = 5 // Function closure.
};
/** Returns node type (see HeapGraphNode::Type). */
@@ -268,13 +268,15 @@ class V8EXPORT HeapGraphNode {
int GetSelfSize() const;
/** Returns node's network (self + reachable nodes) size, in bytes. */
- int GetTotalSize() const;
+ int GetReachableSize() const;
/**
- * Returns node's private size, in bytes. That is, the size of memory
- * that will be reclaimed having this node collected.
+ * Returns node's retained size, in bytes. That is, self + sizes of
+ * the objects that are reachable only from this object. In other
+ * words, the size of memory that will be reclaimed having this node
+ * collected.
*/
- int GetPrivateSize() const;
+ int GetRetainedSize() const;
/** Returns child nodes count of the node. */
int GetChildrenCount() const;
diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h
index 3ac10ab91..ff7322692 100644
--- a/deps/v8/include/v8.h
+++ b/deps/v8/include/v8.h
@@ -919,6 +919,11 @@ class Value : public Data {
*/
V8EXPORT bool IsDate() const;
+ /**
+ * Returns true if this value is a RegExp.
+ */
+ V8EXPORT bool IsRegExp() const;
+
V8EXPORT Local<Boolean> ToBoolean() const;
V8EXPORT Local<Number> ToNumber() const;
V8EXPORT Local<String> ToString() const;
@@ -1819,9 +1824,9 @@ typedef Handle<Value> (*IndexedPropertySetter)(uint32_t index,
/**
* Returns a non-empty handle if the interceptor intercepts the request.
- * The result is true if the property exists and false otherwise.
+ * The result is an integer encoding property attributes.
*/
-typedef Handle<Boolean> (*IndexedPropertyQuery)(uint32_t index,
+typedef Handle<Integer> (*IndexedPropertyQuery)(uint32_t index,
const AccessorInfo& info);
/**
@@ -2140,6 +2145,7 @@ class V8EXPORT ObjectTemplate : public Template {
IndexedPropertyDeleter deleter = 0,
IndexedPropertyEnumerator enumerator = 0,
Handle<Value> data = Handle<Value>());
+
/**
* Sets the callback to be used when calling instances created from
* this template as a function. If no callback is set, instances
diff --git a/deps/v8/src/SConscript b/deps/v8/src/SConscript
index 9ff3414c1..29b8e1f37 100755
--- a/deps/v8/src/SConscript
+++ b/deps/v8/src/SConscript
@@ -84,6 +84,7 @@ SOURCES = {
mark-compact.cc
messages.cc
objects.cc
+ objects-visiting.cc
oprofile-agent.cc
parser.cc
profile-generator.cc
@@ -117,7 +118,6 @@ SOURCES = {
zone.cc
"""),
'arch:arm': Split("""
- fast-codegen.cc
jump-target-light.cc
virtual-frame-light.cc
arm/builtins-arm.cc
@@ -126,7 +126,6 @@ SOURCES = {
arm/cpu-arm.cc
arm/debug-arm.cc
arm/disasm-arm.cc
- arm/fast-codegen-arm.cc
arm/frames-arm.cc
arm/full-codegen-arm.cc
arm/ic-arm.cc
@@ -139,7 +138,6 @@ SOURCES = {
arm/assembler-arm.cc
"""),
'arch:mips': Split("""
- fast-codegen.cc
mips/assembler-mips.cc
mips/builtins-mips.cc
mips/codegen-mips.cc
@@ -147,7 +145,6 @@ SOURCES = {
mips/cpu-mips.cc
mips/debug-mips.cc
mips/disasm-mips.cc
- mips/fast-codegen-mips.cc
mips/full-codegen-mips.cc
mips/frames-mips.cc
mips/ic-mips.cc
@@ -166,7 +163,6 @@ SOURCES = {
ia32/cpu-ia32.cc
ia32/debug-ia32.cc
ia32/disasm-ia32.cc
- ia32/fast-codegen-ia32.cc
ia32/frames-ia32.cc
ia32/full-codegen-ia32.cc
ia32/ic-ia32.cc
@@ -178,7 +174,6 @@ SOURCES = {
ia32/virtual-frame-ia32.cc
"""),
'arch:x64': Split("""
- fast-codegen.cc
jump-target-heavy.cc
virtual-frame-heavy.cc
x64/assembler-x64.cc
@@ -187,7 +182,6 @@ SOURCES = {
x64/cpu-x64.cc
x64/debug-x64.cc
x64/disasm-x64.cc
- x64/fast-codegen-x64.cc
x64/frames-x64.cc
x64/full-codegen-x64.cc
x64/ic-x64.cc
diff --git a/deps/v8/src/SConscript.orig b/deps/v8/src/SConscript.orig
new file mode 100755
index 000000000..e6b4e3820
--- /dev/null
+++ b/deps/v8/src/SConscript.orig
@@ -0,0 +1,324 @@
+# Copyright 2008 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import sys
+from os.path import join, dirname, abspath
+root_dir = dirname(File('SConstruct').rfile().abspath)
+sys.path.append(join(root_dir, 'tools'))
+import js2c
+Import('context')
+
+
+SOURCES = {
+ 'all': Split("""
+ accessors.cc
+ allocation.cc
+ api.cc
+ assembler.cc
+ ast.cc
+ bootstrapper.cc
+ builtins.cc
+ checks.cc
+ circular-queue.cc
+ code-stubs.cc
+ codegen.cc
+ compilation-cache.cc
+ compiler.cc
+ contexts.cc
+ conversions.cc
+ counters.cc
+ cpu-profiler.cc
+ data-flow.cc
+ dateparser.cc
+ debug-agent.cc
+ debug.cc
+ disassembler.cc
+ diy-fp.cc
+ dtoa.cc
+ execution.cc
+ factory.cc
+ flags.cc
+ flow-graph.cc
+ frame-element.cc
+ frames.cc
+ full-codegen.cc
+ func-name-inferrer.cc
+ global-handles.cc
+ fast-dtoa.cc
+ fixed-dtoa.cc
+ handles.cc
+ hashmap.cc
+ heap-profiler.cc
+ heap.cc
+ ic.cc
+ interpreter-irregexp.cc
+ jsregexp.cc
+ jump-target.cc
+ liveedit.cc
+ log-utils.cc
+ log.cc
+ mark-compact.cc
+ messages.cc
+ objects.cc
+ objects-visiting.cc
+ oprofile-agent.cc
+ parser.cc
+ profile-generator.cc
+ property.cc
+ regexp-macro-assembler-irregexp.cc
+ regexp-macro-assembler.cc
+ regexp-stack.cc
+ register-allocator.cc
+ rewriter.cc
+ runtime.cc
+ scanner.cc
+ scopeinfo.cc
+ scopes.cc
+ serialize.cc
+ snapshot-common.cc
+ spaces.cc
+ string-stream.cc
+ stub-cache.cc
+ token.cc
+ top.cc
+ type-info.cc
+ unicode.cc
+ utils.cc
+ v8-counters.cc
+ v8.cc
+ v8threads.cc
+ variables.cc
+ version.cc
+ virtual-frame.cc
+ vm-state.cc
+ zone.cc
+ """),
+ 'arch:arm': Split("""
+ jump-target-light.cc
+ virtual-frame-light.cc
+ arm/builtins-arm.cc
+ arm/codegen-arm.cc
+ arm/constants-arm.cc
+ arm/cpu-arm.cc
+ arm/debug-arm.cc
+ arm/disasm-arm.cc
+ arm/frames-arm.cc
+ arm/full-codegen-arm.cc
+ arm/ic-arm.cc
+ arm/jump-target-arm.cc
+ arm/macro-assembler-arm.cc
+ arm/regexp-macro-assembler-arm.cc
+ arm/register-allocator-arm.cc
+ arm/stub-cache-arm.cc
+ arm/virtual-frame-arm.cc
+ arm/assembler-arm.cc
+ """),
+ 'arch:mips': Split("""
+ mips/assembler-mips.cc
+ mips/builtins-mips.cc
+ mips/codegen-mips.cc
+ mips/constants-mips.cc
+ mips/cpu-mips.cc
+ mips/debug-mips.cc
+ mips/disasm-mips.cc
+ mips/full-codegen-mips.cc
+ mips/frames-mips.cc
+ mips/ic-mips.cc
+ mips/jump-target-mips.cc
+ mips/macro-assembler-mips.cc
+ mips/register-allocator-mips.cc
+ mips/stub-cache-mips.cc
+ mips/virtual-frame-mips.cc
+ """),
+ 'arch:ia32': Split("""
+ jump-target-heavy.cc
+ virtual-frame-heavy.cc
+ ia32/assembler-ia32.cc
+ ia32/builtins-ia32.cc
+ ia32/codegen-ia32.cc
+ ia32/cpu-ia32.cc
+ ia32/debug-ia32.cc
+ ia32/disasm-ia32.cc
+ ia32/frames-ia32.cc
+ ia32/full-codegen-ia32.cc
+ ia32/ic-ia32.cc
+ ia32/jump-target-ia32.cc
+ ia32/macro-assembler-ia32.cc
+ ia32/regexp-macro-assembler-ia32.cc
+ ia32/register-allocator-ia32.cc
+ ia32/stub-cache-ia32.cc
+ ia32/virtual-frame-ia32.cc
+ """),
+ 'arch:x64': Split("""
+ jump-target-heavy.cc
+ virtual-frame-heavy.cc
+ x64/assembler-x64.cc
+ x64/builtins-x64.cc
+ x64/codegen-x64.cc
+ x64/cpu-x64.cc
+ x64/debug-x64.cc
+ x64/disasm-x64.cc
+ x64/frames-x64.cc
+ x64/full-codegen-x64.cc
+ x64/ic-x64.cc
+ x64/jump-target-x64.cc
+ x64/macro-assembler-x64.cc
+ x64/regexp-macro-assembler-x64.cc
+ x64/register-allocator-x64.cc
+ x64/stub-cache-x64.cc
+ x64/virtual-frame-x64.cc
+ """),
+ 'simulator:arm': ['arm/simulator-arm.cc'],
+ 'simulator:mips': ['mips/simulator-mips.cc'],
+ 'os:freebsd': ['platform-freebsd.cc', 'platform-posix.cc'],
+ 'os:openbsd': ['platform-openbsd.cc', 'platform-posix.cc'],
+ 'os:linux': ['platform-linux.cc', 'platform-posix.cc'],
+ 'os:android': ['platform-linux.cc', 'platform-posix.cc'],
+ 'os:macos': ['platform-macos.cc', 'platform-posix.cc'],
+ 'os:solaris': ['platform-solaris.cc', 'platform-posix.cc'],
+ 'os:nullos': ['platform-nullos.cc'],
+ 'os:win32': ['platform-win32.cc'],
+ 'mode:release': [],
+ 'mode:debug': [
+ 'objects-debug.cc', 'prettyprinter.cc', 'regexp-macro-assembler-tracer.cc'
+ ]
+}
+
+
+D8_FILES = {
+ 'all': [
+ 'd8.cc', 'd8-debug.cc'
+ ],
+ 'os:linux': [
+ 'd8-posix.cc'
+ ],
+ 'os:macos': [
+ 'd8-posix.cc'
+ ],
+ 'os:android': [
+ 'd8-posix.cc'
+ ],
+ 'os:freebsd': [
+ 'd8-posix.cc'
+ ],
+ 'os:openbsd': [
+ 'd8-posix.cc'
+ ],
+ 'os:solaris': [
+ 'd8-posix.cc'
+ ],
+ 'os:win32': [
+ 'd8-windows.cc'
+ ],
+ 'os:nullos': [
+ 'd8-windows.cc' # Empty implementation at the moment.
+ ],
+ 'console:readline': [
+ 'd8-readline.cc'
+ ]
+}
+
+
+LIBRARY_FILES = '''
+runtime.js
+v8natives.js
+array.js
+string.js
+uri.js
+math.js
+messages.js
+apinatives.js
+date.js
+regexp.js
+json.js
+liveedit-debugger.js
+mirror-debugger.js
+debug-debugger.js
+'''.split()
+
+
+def Abort(message):
+ print message
+ sys.exit(1)
+
+
+def ConfigureObjectFiles():
+ env = Environment()
+ env.Replace(**context.flags['v8'])
+ context.ApplyEnvOverrides(env)
+ env['BUILDERS']['JS2C'] = Builder(action=js2c.JS2C)
+ env['BUILDERS']['Snapshot'] = Builder(action='$SOURCE $TARGET --logfile "$LOGFILE" --log-snapshot-positions')
+
+ # Build the standard platform-independent source files.
+ source_files = context.GetRelevantSources(SOURCES)
+
+ d8_files = context.GetRelevantSources(D8_FILES)
+ d8_js = env.JS2C('d8-js.cc', 'd8.js', TYPE='D8')
+ d8_js_obj = context.ConfigureObject(env, d8_js, CPPPATH=['.'])
+ d8_objs = [context.ConfigureObject(env, [d8_files]), d8_js_obj]
+
+ # Combine the JavaScript library files into a single C++ file and
+ # compile it.
+ library_files = [s for s in LIBRARY_FILES]
+ library_files.append('macros.py')
+ libraries_src, libraries_empty_src = env.JS2C(['libraries.cc', 'libraries-empty.cc'], library_files, TYPE='CORE')
+ libraries_obj = context.ConfigureObject(env, libraries_src, CPPPATH=['.'])
+
+ # Build dtoa.
+ dtoa_env = env.Copy()
+ dtoa_env.Replace(**context.flags['dtoa'])
+ dtoa_files = ['dtoa-config.c']
+ dtoa_obj = context.ConfigureObject(dtoa_env, dtoa_files)
+
+ source_objs = context.ConfigureObject(env, source_files)
+ non_snapshot_files = [dtoa_obj, source_objs]
+
+ # Create snapshot if necessary. For cross compilation you should either
+ # do without snapshots and take the performance hit or you should build a
+ # host VM with the simulator=arm and snapshot=on options and then take the
+ # resulting snapshot.cc file from obj/release and put it in the src
+ # directory. Then rebuild the VM with the cross compiler and specify
+ # snapshot=nobuild on the scons command line.
+ empty_snapshot_obj = context.ConfigureObject(env, 'snapshot-empty.cc')
+ mksnapshot_env = env.Copy()
+ mksnapshot_env.Replace(**context.flags['mksnapshot'])
+ mksnapshot_src = 'mksnapshot.cc'
+ mksnapshot = mksnapshot_env.Program('mksnapshot', [mksnapshot_src, libraries_obj, non_snapshot_files, empty_snapshot_obj], PDB='mksnapshot.exe.pdb')
+ if context.use_snapshot:
+ if context.build_snapshot:
+ snapshot_cc = env.Snapshot('snapshot.cc', mksnapshot, LOGFILE=File('snapshot.log').abspath)
+ else:
+ snapshot_cc = 'snapshot.cc'
+ snapshot_obj = context.ConfigureObject(env, snapshot_cc, CPPPATH=['.'])
+ else:
+ snapshot_obj = empty_snapshot_obj
+ library_objs = [non_snapshot_files, libraries_obj, snapshot_obj]
+ return (library_objs, d8_objs, [mksnapshot])
+
+
+(library_objs, d8_objs, mksnapshot) = ConfigureObjectFiles()
+Return('library_objs d8_objs mksnapshot')
diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/accessors.cc
index ed0bbd7a1..3c4984660 100644
--- a/deps/v8/src/accessors.cc
+++ b/deps/v8/src/accessors.cc
@@ -488,7 +488,7 @@ Object* Accessors::FunctionGetLength(Object* object, void*) {
JSFunction* function = FindInPrototypeChain<JSFunction>(object, &found_it);
if (!found_it) return Smi::FromInt(0);
// Check if already compiled.
- if (!function->is_compiled()) {
+ if (!function->shared()->is_compiled()) {
// If the function isn't compiled yet, the length is not computed
// correctly yet. Compile it now and return the right length.
HandleScope scope;
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index 4fdc95f5e..7a967dbff 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -174,6 +174,8 @@ void i::V8::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
heap_stats.objects_per_type = objects_per_type;
int size_per_type[LAST_TYPE + 1] = {0};
heap_stats.size_per_type = size_per_type;
+ int os_error;
+ heap_stats.os_error = &os_error;
int end_marker;
heap_stats.end_marker = &end_marker;
i::Heap::RecordStats(&heap_stats, take_snapshot);
@@ -1792,6 +1794,13 @@ bool Value::IsDate() const {
}
+bool Value::IsRegExp() const {
+ if (IsDeadCheck("v8::Value::IsRegExp()")) return false;
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ return obj->IsJSRegExp();
+}
+
+
Local<String> Value::ToString() const {
if (IsDeadCheck("v8::Value::ToString()")) return Local<String>();
LOG_API("ToString");
@@ -4491,24 +4500,27 @@ const CpuProfile* CpuProfiler::StopProfiling(Handle<String> title,
}
+static i::HeapGraphEdge* ToInternal(const HeapGraphEdge* edge) {
+ return const_cast<i::HeapGraphEdge*>(
+ reinterpret_cast<const i::HeapGraphEdge*>(edge));
+}
+
HeapGraphEdge::Type HeapGraphEdge::GetType() const {
IsDeadCheck("v8::HeapGraphEdge::GetType");
- return static_cast<HeapGraphEdge::Type>(
- reinterpret_cast<const i::HeapGraphEdge*>(this)->type());
+ return static_cast<HeapGraphEdge::Type>(ToInternal(this)->type());
}
Handle<Value> HeapGraphEdge::GetName() const {
IsDeadCheck("v8::HeapGraphEdge::GetName");
- const i::HeapGraphEdge* edge =
- reinterpret_cast<const i::HeapGraphEdge*>(this);
+ i::HeapGraphEdge* edge = ToInternal(this);
switch (edge->type()) {
- case i::HeapGraphEdge::CONTEXT_VARIABLE:
- case i::HeapGraphEdge::INTERNAL:
- case i::HeapGraphEdge::PROPERTY:
+ case i::HeapGraphEdge::kContextVariable:
+ case i::HeapGraphEdge::kInternal:
+ case i::HeapGraphEdge::kProperty:
return Handle<String>(ToApi<String>(i::Factory::LookupAsciiSymbol(
edge->name())));
- case i::HeapGraphEdge::ELEMENT:
+ case i::HeapGraphEdge::kElement:
return Handle<Number>(ToApi<Number>(i::Factory::NewNumberFromInt(
edge->index())));
default: UNREACHABLE();
@@ -4519,28 +4531,32 @@ Handle<Value> HeapGraphEdge::GetName() const {
const HeapGraphNode* HeapGraphEdge::GetFromNode() const {
IsDeadCheck("v8::HeapGraphEdge::GetFromNode");
- const i::HeapEntry* from =
- reinterpret_cast<const i::HeapGraphEdge*>(this)->from();
+ const i::HeapEntry* from = ToInternal(this)->From();
return reinterpret_cast<const HeapGraphNode*>(from);
}
const HeapGraphNode* HeapGraphEdge::GetToNode() const {
IsDeadCheck("v8::HeapGraphEdge::GetToNode");
- const i::HeapEntry* to =
- reinterpret_cast<const i::HeapGraphEdge*>(this)->to();
+ const i::HeapEntry* to = ToInternal(this)->to();
return reinterpret_cast<const HeapGraphNode*>(to);
}
+static i::HeapGraphPath* ToInternal(const HeapGraphPath* path) {
+ return const_cast<i::HeapGraphPath*>(
+ reinterpret_cast<const i::HeapGraphPath*>(path));
+}
+
+
int HeapGraphPath::GetEdgesCount() const {
- return reinterpret_cast<const i::HeapGraphPath*>(this)->path()->length();
+ return ToInternal(this)->path()->length();
}
const HeapGraphEdge* HeapGraphPath::GetEdge(int index) const {
return reinterpret_cast<const HeapGraphEdge*>(
- reinterpret_cast<const i::HeapGraphPath*>(this)->path()->at(index));
+ ToInternal(this)->path()->at(index));
}
@@ -4555,137 +4571,136 @@ const HeapGraphNode* HeapGraphPath::GetToNode() const {
}
+static i::HeapEntry* ToInternal(const HeapGraphNode* entry) {
+ return const_cast<i::HeapEntry*>(
+ reinterpret_cast<const i::HeapEntry*>(entry));
+}
+
+
HeapGraphNode::Type HeapGraphNode::GetType() const {
IsDeadCheck("v8::HeapGraphNode::GetType");
- return static_cast<HeapGraphNode::Type>(
- reinterpret_cast<const i::HeapEntry*>(this)->type());
+ return static_cast<HeapGraphNode::Type>(ToInternal(this)->type());
}
Handle<String> HeapGraphNode::GetName() const {
IsDeadCheck("v8::HeapGraphNode::GetName");
return Handle<String>(ToApi<String>(i::Factory::LookupAsciiSymbol(
- reinterpret_cast<const i::HeapEntry*>(this)->name())));
+ ToInternal(this)->name())));
}
uint64_t HeapGraphNode::GetId() const {
IsDeadCheck("v8::HeapGraphNode::GetId");
- return reinterpret_cast<const i::HeapEntry*>(this)->id();
+ return ToInternal(this)->id();
}
int HeapGraphNode::GetSelfSize() const {
IsDeadCheck("v8::HeapGraphNode::GetSelfSize");
- return reinterpret_cast<const i::HeapEntry*>(this)->self_size();
+ return ToInternal(this)->self_size();
}
-int HeapGraphNode::GetTotalSize() const {
- IsDeadCheck("v8::HeapSnapshot::GetHead");
- return const_cast<i::HeapEntry*>(
- reinterpret_cast<const i::HeapEntry*>(this))->TotalSize();
+int HeapGraphNode::GetReachableSize() const {
+ IsDeadCheck("v8::HeapSnapshot::GetReachableSize");
+ return ToInternal(this)->ReachableSize();
}
-int HeapGraphNode::GetPrivateSize() const {
- IsDeadCheck("v8::HeapSnapshot::GetPrivateSize");
- return const_cast<i::HeapEntry*>(
- reinterpret_cast<const i::HeapEntry*>(this))->NonSharedTotalSize();
+int HeapGraphNode::GetRetainedSize() const {
+ IsDeadCheck("v8::HeapSnapshot::GetRetainedSize");
+ return ToInternal(this)->RetainedSize();
}
int HeapGraphNode::GetChildrenCount() const {
IsDeadCheck("v8::HeapSnapshot::GetChildrenCount");
- return reinterpret_cast<const i::HeapEntry*>(this)->children()->length();
+ return ToInternal(this)->children().length();
}
const HeapGraphEdge* HeapGraphNode::GetChild(int index) const {
IsDeadCheck("v8::HeapSnapshot::GetChild");
return reinterpret_cast<const HeapGraphEdge*>(
- reinterpret_cast<const i::HeapEntry*>(this)->children()->at(index));
+ &ToInternal(this)->children()[index]);
}
int HeapGraphNode::GetRetainersCount() const {
IsDeadCheck("v8::HeapSnapshot::GetRetainersCount");
- return reinterpret_cast<const i::HeapEntry*>(this)->retainers()->length();
+ return ToInternal(this)->retainers().length();
}
const HeapGraphEdge* HeapGraphNode::GetRetainer(int index) const {
IsDeadCheck("v8::HeapSnapshot::GetRetainer");
return reinterpret_cast<const HeapGraphEdge*>(
- reinterpret_cast<const i::HeapEntry*>(this)->retainers()->at(index));
+ ToInternal(this)->retainers()[index]);
}
int HeapGraphNode::GetRetainingPathsCount() const {
IsDeadCheck("v8::HeapSnapshot::GetRetainingPathsCount");
- return const_cast<i::HeapEntry*>(
- reinterpret_cast<const i::HeapEntry*>(
- this))->GetRetainingPaths()->length();
+ return ToInternal(this)->GetRetainingPaths()->length();
}
const HeapGraphPath* HeapGraphNode::GetRetainingPath(int index) const {
IsDeadCheck("v8::HeapSnapshot::GetRetainingPath");
return reinterpret_cast<const HeapGraphPath*>(
- const_cast<i::HeapEntry*>(
- reinterpret_cast<const i::HeapEntry*>(
- this))->GetRetainingPaths()->at(index));
+ ToInternal(this)->GetRetainingPaths()->at(index));
}
const HeapGraphNode* HeapSnapshotsDiff::GetAdditionsRoot() const {
IsDeadCheck("v8::HeapSnapshotsDiff::GetAdditionsRoot");
- const i::HeapSnapshotsDiff* diff =
- reinterpret_cast<const i::HeapSnapshotsDiff*>(this);
+ i::HeapSnapshotsDiff* diff =
+ const_cast<i::HeapSnapshotsDiff*>(
+ reinterpret_cast<const i::HeapSnapshotsDiff*>(this));
return reinterpret_cast<const HeapGraphNode*>(diff->additions_root());
}
const HeapGraphNode* HeapSnapshotsDiff::GetDeletionsRoot() const {
IsDeadCheck("v8::HeapSnapshotsDiff::GetDeletionsRoot");
- const i::HeapSnapshotsDiff* diff =
- reinterpret_cast<const i::HeapSnapshotsDiff*>(this);
+ i::HeapSnapshotsDiff* diff =
+ const_cast<i::HeapSnapshotsDiff*>(
+ reinterpret_cast<const i::HeapSnapshotsDiff*>(this));
return reinterpret_cast<const HeapGraphNode*>(diff->deletions_root());
}
+static i::HeapSnapshot* ToInternal(const HeapSnapshot* snapshot) {
+ return const_cast<i::HeapSnapshot*>(
+ reinterpret_cast<const i::HeapSnapshot*>(snapshot));
+}
+
+
unsigned HeapSnapshot::GetUid() const {
IsDeadCheck("v8::HeapSnapshot::GetUid");
- return reinterpret_cast<const i::HeapSnapshot*>(this)->uid();
+ return ToInternal(this)->uid();
}
Handle<String> HeapSnapshot::GetTitle() const {
IsDeadCheck("v8::HeapSnapshot::GetTitle");
- const i::HeapSnapshot* snapshot =
- reinterpret_cast<const i::HeapSnapshot*>(this);
return Handle<String>(ToApi<String>(i::Factory::LookupAsciiSymbol(
- snapshot->title())));
+ ToInternal(this)->title())));
}
const HeapGraphNode* HeapSnapshot::GetRoot() const {
IsDeadCheck("v8::HeapSnapshot::GetHead");
- const i::HeapSnapshot* snapshot =
- reinterpret_cast<const i::HeapSnapshot*>(this);
- return reinterpret_cast<const HeapGraphNode*>(snapshot->const_root());
+ return reinterpret_cast<const HeapGraphNode*>(ToInternal(this)->root());
}
const HeapSnapshotsDiff* HeapSnapshot::CompareWith(
const HeapSnapshot* snapshot) const {
IsDeadCheck("v8::HeapSnapshot::CompareWith");
- i::HeapSnapshot* snapshot1 = const_cast<i::HeapSnapshot*>(
- reinterpret_cast<const i::HeapSnapshot*>(this));
- i::HeapSnapshot* snapshot2 = const_cast<i::HeapSnapshot*>(
- reinterpret_cast<const i::HeapSnapshot*>(snapshot));
return reinterpret_cast<const HeapSnapshotsDiff*>(
- snapshot1->CompareWith(snapshot2));
+ ToInternal(this)->CompareWith(ToInternal(snapshot)));
}
diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h
index 5be57709e..f72ad76ab 100644
--- a/deps/v8/src/arm/assembler-arm-inl.h
+++ b/deps/v8/src/arm/assembler-arm-inl.h
@@ -190,6 +190,29 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
}
+template<typename StaticVisitor>
+void RelocInfo::Visit() {
+ RelocInfo::Mode mode = rmode();
+ if (mode == RelocInfo::EMBEDDED_OBJECT) {
+ StaticVisitor::VisitPointer(target_object_address());
+ } else if (RelocInfo::IsCodeTarget(mode)) {
+ StaticVisitor::VisitCodeTarget(this);
+ } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
+ StaticVisitor::VisitExternalReference(target_reference_address());
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ } else if (Debug::has_break_points() &&
+ ((RelocInfo::IsJSReturn(mode) &&
+ IsPatchedReturnSequence()) ||
+ (RelocInfo::IsDebugBreakSlot(mode) &&
+ IsPatchedDebugBreakSlotSequence()))) {
+ StaticVisitor::VisitDebugTarget(this);
+#endif
+ } else if (mode == RelocInfo::RUNTIME_ENTRY) {
+ StaticVisitor::VisitRuntimeEntry(this);
+ }
+}
+
+
Operand::Operand(int32_t immediate, RelocInfo::Mode rmode) {
rm_ = no_reg;
imm32_ = immediate;
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index b1705df9b..136c82e7e 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -2276,6 +2276,21 @@ void Assembler::vcmp(const DwVfpRegister src1,
}
+void Assembler::vcmp(const DwVfpRegister src1,
+ const double src2,
+ const SBit s,
+ const Condition cond) {
+ // vcmp(Dd, Dm) double precision floating point comparison.
+ // Instruction details available in ARM DDI 0406A, A8-570.
+ // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0101 (19-16) |
+ // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=? | 1(6) | M(5)=? | 0(4) | 0000(3-0)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(src2 == 0.0);
+ emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 | B16 |
+ src1.code()*B12 | 0x5*B9 | B8 | B6);
+}
+
+
void Assembler::vmrs(Register dst, Condition cond) {
// Instruction details available in ARM DDI 0406A, A8-652.
// cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) |
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index 16e69e296..218eb97f3 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -1031,6 +1031,10 @@ class Assembler : public Malloced {
const DwVfpRegister src2,
const SBit s = LeaveCC,
const Condition cond = al);
+ void vcmp(const DwVfpRegister src1,
+ const double src2,
+ const SBit s = LeaveCC,
+ const Condition cond = al);
void vmrs(const Register dst,
const Condition cond = al);
void vsqrt(const DwVfpRegister dst,
diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc
index b1f29ba38..7e7e358c1 100644
--- a/deps/v8/src/arm/builtins-arm.cc
+++ b/deps/v8/src/arm/builtins-arm.cc
@@ -911,6 +911,29 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
}
+void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
+ // Enter an internal frame.
+ __ EnterInternalFrame();
+
+ // Preserve the function.
+ __ push(r1);
+
+ // Push the function on the stack as the argument to the runtime function.
+ __ push(r1);
+ __ CallRuntime(Runtime::kLazyCompile, 1);
+ // Calculate the entry point.
+ __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ // Restore saved function.
+ __ pop(r1);
+
+ // Tear down temporary frame.
+ __ LeaveInternalFrame();
+
+ // Do a tail-call of the compiled function.
+ __ Jump(r2);
+}
+
+
void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// 1. Make sure we have at least one argument.
// r0: actual number of arguments
@@ -1050,7 +1073,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ ldr(r2,
FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
__ mov(r2, Operand(r2, ASR, kSmiTagSize));
- __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset));
+ __ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeOffset));
__ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
__ cmp(r2, r0); // Check formal and actual parameter counts.
__ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index 4bcf1a07d..df17b6f86 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -217,93 +217,80 @@ void CodeGenerator::Generate(CompilationInfo* info) {
}
#endif
- if (info->mode() == CompilationInfo::PRIMARY) {
- frame_->Enter();
- // tos: code slot
-
- // Allocate space for locals and initialize them. This also checks
- // for stack overflow.
- frame_->AllocateStackSlots();
-
- frame_->AssertIsSpilled();
- int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0) {
- // Allocate local context.
- // Get outer context and create a new context based on it.
- __ ldr(r0, frame_->Function());
- frame_->EmitPush(r0);
- if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(heap_slots);
- frame_->CallStub(&stub, 1);
- } else {
- frame_->CallRuntime(Runtime::kNewContext, 1);
- }
+ frame_->Enter();
+ // tos: code slot
+
+ // Allocate space for locals and initialize them. This also checks
+ // for stack overflow.
+ frame_->AllocateStackSlots();
+
+ frame_->AssertIsSpilled();
+ int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ if (heap_slots > 0) {
+ // Allocate local context.
+ // Get outer context and create a new context based on it.
+ __ ldr(r0, frame_->Function());
+ frame_->EmitPush(r0);
+ if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(heap_slots);
+ frame_->CallStub(&stub, 1);
+ } else {
+ frame_->CallRuntime(Runtime::kNewContext, 1);
+ }
#ifdef DEBUG
- JumpTarget verified_true;
- __ cmp(r0, cp);
- verified_true.Branch(eq);
- __ stop("NewContext: r0 is expected to be the same as cp");
- verified_true.Bind();
+ JumpTarget verified_true;
+ __ cmp(r0, cp);
+ verified_true.Branch(eq);
+ __ stop("NewContext: r0 is expected to be the same as cp");
+ verified_true.Bind();
#endif
- // Update context local.
- __ str(cp, frame_->Context());
- }
+ // Update context local.
+ __ str(cp, frame_->Context());
+ }
- // TODO(1241774): Improve this code:
- // 1) only needed if we have a context
- // 2) no need to recompute context ptr every single time
- // 3) don't copy parameter operand code from SlotOperand!
- {
- Comment cmnt2(masm_, "[ copy context parameters into .context");
- // Note that iteration order is relevant here! If we have the same
- // parameter twice (e.g., function (x, y, x)), and that parameter
- // needs to be copied into the context, it must be the last argument
- // passed to the parameter that needs to be copied. This is a rare
- // case so we don't check for it, instead we rely on the copying
- // order: such a parameter is copied repeatedly into the same
- // context location and thus the last value is what is seen inside
- // the function.
- frame_->AssertIsSpilled();
- for (int i = 0; i < scope()->num_parameters(); i++) {
- Variable* par = scope()->parameter(i);
- Slot* slot = par->slot();
- if (slot != NULL && slot->type() == Slot::CONTEXT) {
- ASSERT(!scope()->is_global_scope()); // No params in global scope.
- __ ldr(r1, frame_->ParameterAt(i));
- // Loads r2 with context; used below in RecordWrite.
- __ str(r1, SlotOperand(slot, r2));
- // Load the offset into r3.
- int slot_offset =
- FixedArray::kHeaderSize + slot->index() * kPointerSize;
- __ RecordWrite(r2, Operand(slot_offset), r3, r1);
- }
+ // TODO(1241774): Improve this code:
+ // 1) only needed if we have a context
+ // 2) no need to recompute context ptr every single time
+ // 3) don't copy parameter operand code from SlotOperand!
+ {
+ Comment cmnt2(masm_, "[ copy context parameters into .context");
+ // Note that iteration order is relevant here! If we have the same
+ // parameter twice (e.g., function (x, y, x)), and that parameter
+ // needs to be copied into the context, it must be the last argument
+ // passed to the parameter that needs to be copied. This is a rare
+ // case so we don't check for it, instead we rely on the copying
+ // order: such a parameter is copied repeatedly into the same
+ // context location and thus the last value is what is seen inside
+ // the function.
+ frame_->AssertIsSpilled();
+ for (int i = 0; i < scope()->num_parameters(); i++) {
+ Variable* par = scope()->parameter(i);
+ Slot* slot = par->slot();
+ if (slot != NULL && slot->type() == Slot::CONTEXT) {
+ ASSERT(!scope()->is_global_scope()); // No params in global scope.
+ __ ldr(r1, frame_->ParameterAt(i));
+ // Loads r2 with context; used below in RecordWrite.
+ __ str(r1, SlotOperand(slot, r2));
+ // Load the offset into r3.
+ int slot_offset =
+ FixedArray::kHeaderSize + slot->index() * kPointerSize;
+ __ RecordWrite(r2, Operand(slot_offset), r3, r1);
}
}
+ }
- // Store the arguments object. This must happen after context
- // initialization because the arguments object may be stored in
- // the context.
- if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) {
- StoreArgumentsObject(true);
- }
+ // Store the arguments object. This must happen after context
+ // initialization because the arguments object may be stored in
+ // the context.
+ if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) {
+ StoreArgumentsObject(true);
+ }
- // Initialize ThisFunction reference if present.
- if (scope()->is_function_scope() && scope()->function() != NULL) {
- frame_->EmitPushRoot(Heap::kTheHoleValueRootIndex);
- StoreToSlot(scope()->function()->slot(), NOT_CONST_INIT);
- }
- } else {
- // When used as the secondary compiler for splitting, r1, cp,
- // fp, and lr have been pushed on the stack. Adjust the virtual
- // frame to match this state.
- frame_->Adjust(4);
-
- // Bind all the bailout labels to the beginning of the function.
- List<CompilationInfo::Bailout*>* bailouts = info->bailouts();
- for (int i = 0; i < bailouts->length(); i++) {
- __ bind(bailouts->at(i)->label());
- }
+ // Initialize ThisFunction reference if present.
+ if (scope()->is_function_scope() && scope()->function() != NULL) {
+ frame_->EmitPushRoot(Heap::kTheHoleValueRootIndex);
+ StoreToSlot(scope()->function()->slot(), NOT_CONST_INIT);
}
// Initialize the function return target after the locals are set
@@ -532,6 +519,10 @@ void CodeGenerator::LoadCondition(Expression* x,
void CodeGenerator::Load(Expression* expr) {
+ // We generally assume that we are not in a spilled scope for most
+ // of the code generator. A failure to ensure this caused issue 815
+ // and this assert is designed to catch similar issues.
+ frame_->AssertIsNotSpilled();
#ifdef DEBUG
int original_height = frame_->height();
#endif
@@ -688,6 +679,10 @@ Reference::Reference(CodeGenerator* cgen,
expression_(expression),
type_(ILLEGAL),
persist_after_get_(persist_after_get) {
+ // We generally assume that we are not in a spilled scope for most
+ // of the code generator. A failure to ensure this caused issue 815
+ // and this assert is designed to catch similar issues.
+ cgen->frame()->AssertIsNotSpilled();
cgen->LoadReference(this);
}
@@ -784,12 +779,26 @@ void CodeGenerator::ToBoolean(JumpTarget* true_target,
__ tst(tos, Operand(kSmiTagMask));
true_target->Branch(eq);
- // Slow case: call the runtime.
- frame_->EmitPush(tos);
- frame_->CallRuntime(Runtime::kToBool, 1);
- // Convert the result (r0) to a condition code.
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ cmp(r0, ip);
+ // Slow case.
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ // Implements the slow case by using ToBooleanStub.
+ // The ToBooleanStub takes a single argument, and
+ // returns a non-zero value for true, or zero for false.
+ // Both the argument value and the return value use the
+ // register assigned to tos_
+ ToBooleanStub stub(tos);
+ frame_->CallStub(&stub, 0);
+ // Convert the result in "tos" to a condition code.
+ __ cmp(tos, Operand(0));
+ } else {
+ // Implements slow case by calling the runtime.
+ frame_->EmitPush(tos);
+ frame_->CallRuntime(Runtime::kToBool, 1);
+ // Convert the result (r0) to a condition code.
+ __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+ __ cmp(r0, ip);
+ }
}
cc_reg_ = ne;
@@ -1213,7 +1222,21 @@ void CodeGenerator::SmiOperation(Token::Value op,
case Token::SHR:
case Token::SAR: {
ASSERT(!reversed);
- TypeInfo result = TypeInfo::Integer32();
+ TypeInfo result =
+ (op == Token::SAR) ? TypeInfo::Integer32() : TypeInfo::Number();
+ if (!reversed) {
+ if (op == Token::SHR) {
+ if (int_value >= 2) {
+ result = TypeInfo::Smi();
+ } else if (int_value >= 1) {
+ result = TypeInfo::Integer32();
+ }
+ } else {
+ if (int_value >= 1) {
+ result = TypeInfo::Smi();
+ }
+ }
+ }
Register scratch = VirtualFrame::scratch0();
Register scratch2 = VirtualFrame::scratch1();
int shift_value = int_value & 0x1f; // least significant 5 bits
@@ -1532,9 +1555,8 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
__ BranchOnSmi(r0, &build_args);
__ CompareObjectType(r0, r1, r2, JS_FUNCTION_TYPE);
__ b(ne, &build_args);
- __ ldr(r0, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
- __ ldr(r1, FieldMemOperand(r0, SharedFunctionInfo::kCodeOffset));
+ __ ldr(r1, FieldMemOperand(r0, JSFunction::kCodeOffset));
__ cmp(r1, Operand(apply_code));
__ b(ne, &build_args);
@@ -1899,19 +1921,17 @@ void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
- frame_->SpillAll();
Comment cmnt(masm_, "[ ReturnStatement");
CodeForStatementPosition(node);
Load(node->expression());
+ frame_->PopToR0();
+ frame_->PrepareForReturn();
if (function_return_is_shadowed_) {
- frame_->EmitPop(r0);
function_return_.Jump();
} else {
// Pop the result from the frame and prepare the frame for
// returning thus making it easier to merge.
- frame_->PopToR0();
- frame_->PrepareForReturn();
if (function_return_.is_bound()) {
// If the function return label is already bound we reuse the
// code by jumping to the return site.
@@ -2307,7 +2327,6 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ ForInStatement");
CodeForStatementPosition(node);
@@ -2321,6 +2340,7 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
// Get the object to enumerate over (converted to JSObject).
Load(node->enumerable());
+ VirtualFrame::SpilledScope spilled_scope(frame_);
// Both SpiderMonkey and kjs ignore null and undefined in contrast
// to the specification. 12.6.4 mandates a call to ToObject.
frame_->EmitPop(r0);
@@ -2482,36 +2502,39 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
frame_->EmitPush(r0);
frame_->EmitPush(r3); // push entry
frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_JS, 2);
- __ mov(r3, Operand(r0));
-
+ __ mov(r3, Operand(r0), SetCC);
// If the property has been removed while iterating, we just skip it.
- __ LoadRoot(ip, Heap::kNullValueRootIndex);
- __ cmp(r3, ip);
node->continue_target()->Branch(eq);
end_del_check.Bind();
// Store the entry in the 'each' expression and take another spin in the
// loop. r3: i'th entry of the enum cache (or string there of)
frame_->EmitPush(r3); // push entry
- { Reference each(this, node->each());
+ { VirtualFrame::RegisterAllocationScope scope(this);
+ Reference each(this, node->each());
if (!each.is_illegal()) {
if (each.size() > 0) {
+ // Loading a reference may leave the frame in an unspilled state.
+ frame_->SpillAll(); // Sync stack to memory.
+ // Get the value (under the reference on the stack) from memory.
__ ldr(r0, frame_->ElementAt(each.size()));
frame_->EmitPush(r0);
each.SetValue(NOT_CONST_INIT, UNLIKELY_SMI);
- frame_->Drop(2);
+ frame_->Drop(2); // The result of the set and the extra pushed value.
} else {
// If the reference was to a slot we rely on the convenient property
- // that it doesn't matter whether a value (eg, r3 pushed above) is
+ // that it doesn't matter whether a value (eg, ebx pushed above) is
// right on top of or right underneath a zero-sized reference.
each.SetValue(NOT_CONST_INIT, UNLIKELY_SMI);
- frame_->Drop();
+ frame_->Drop(1); // Drop the result of the set operation.
}
}
}
// Body.
CheckStack(); // TODO(1222600): ignore if body contains calls.
- Visit(node->body());
+ { VirtualFrame::RegisterAllocationScope scope(this);
+ Visit(node->body());
+ }
// Next. Reestablish a spilled frame in case we are coming here via
// a continue in the body.
@@ -2558,7 +2581,9 @@ void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
// Remove the exception from the stack.
frame_->Drop();
- VisitStatements(node->catch_block()->statements());
+ { VirtualFrame::RegisterAllocationScope scope(this);
+ VisitStatements(node->catch_block()->statements());
+ }
if (frame_ != NULL) {
exit.Jump();
}
@@ -2593,7 +2618,9 @@ void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
}
// Generate code for the statements in the try block.
- VisitStatements(node->try_block()->statements());
+ { VirtualFrame::RegisterAllocationScope scope(this);
+ VisitStatements(node->try_block()->statements());
+ }
// Stop the introduced shadowing and count the number of required unlinks.
// After shadowing stops, the original labels are unshadowed and the
@@ -2614,7 +2641,7 @@ void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
// the handler list and drop the rest of this handler from the
// frame.
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- frame_->EmitPop(r1);
+ frame_->EmitPop(r1); // r0 can contain the return value.
__ mov(r3, Operand(handler_address));
__ str(r1, MemOperand(r3));
frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
@@ -2640,7 +2667,7 @@ void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
frame_->Forget(frame_->height() - handler_height);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- frame_->EmitPop(r1);
+ frame_->EmitPop(r1); // r0 can contain the return value.
__ str(r1, MemOperand(r3));
frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
@@ -2707,7 +2734,9 @@ void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
}
// Generate code for the statements in the try block.
- VisitStatements(node->try_block()->statements());
+ { VirtualFrame::RegisterAllocationScope scope(this);
+ VisitStatements(node->try_block()->statements());
+ }
// Stop the introduced shadowing and count the number of required unlinks.
// After shadowing stops, the original labels are unshadowed and the
@@ -2797,7 +2826,9 @@ void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
// and the state - while evaluating the finally block.
//
// Generate code for the statements in the finally block.
- VisitStatements(node->finally_block()->statements());
+ { VirtualFrame::RegisterAllocationScope scope(this);
+ VisitStatements(node->finally_block()->statements());
+ }
if (has_valid_frame()) {
// Restore state and return value or faked TOS.
@@ -3977,7 +4008,6 @@ void CodeGenerator::VisitCall(Call* node) {
} else if (var != NULL && var->slot() != NULL &&
var->slot()->type() == Slot::LOOKUP) {
- VirtualFrame::SpilledScope spilled_scope(frame_);
// ----------------------------------
// JavaScript examples:
//
@@ -3990,8 +4020,6 @@ void CodeGenerator::VisitCall(Call* node) {
// }
// ----------------------------------
- // JumpTargets do not yet support merging frames so the frame must be
- // spilled when jumping to these targets.
JumpTarget slow, done;
// Generate fast case for loading functions from slots that
@@ -4005,8 +4033,7 @@ void CodeGenerator::VisitCall(Call* node) {
slow.Bind();
// Load the function
frame_->EmitPush(cp);
- __ mov(r0, Operand(var->name()));
- frame_->EmitPush(r0);
+ frame_->EmitPush(Operand(var->name()));
frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
// r0: slot value; r1: receiver
@@ -4022,7 +4049,7 @@ void CodeGenerator::VisitCall(Call* node) {
call.Jump();
done.Bind();
frame_->EmitPush(r0); // function
- LoadGlobalReceiver(r1); // receiver
+ LoadGlobalReceiver(VirtualFrame::scratch0()); // receiver
call.Bind();
}
@@ -4077,8 +4104,6 @@ void CodeGenerator::VisitCall(Call* node) {
// -------------------------------------------
// JavaScript example: 'array[index](1, 2, 3)'
// -------------------------------------------
- VirtualFrame::SpilledScope spilled_scope(frame_);
-
Load(property->obj());
if (property->is_synthetic()) {
Load(property->key());
@@ -4086,7 +4111,7 @@ void CodeGenerator::VisitCall(Call* node) {
// Put the function below the receiver.
// Use the global receiver.
frame_->EmitPush(r0); // Function.
- LoadGlobalReceiver(r0);
+ LoadGlobalReceiver(VirtualFrame::scratch0());
// Call the function.
CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position());
frame_->EmitPush(r0);
@@ -4099,6 +4124,7 @@ void CodeGenerator::VisitCall(Call* node) {
// Set the name register and call the IC initialization code.
Load(property->key());
+ frame_->SpillAll();
frame_->EmitPop(r2); // Function name.
InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
@@ -4118,10 +4144,8 @@ void CodeGenerator::VisitCall(Call* node) {
// Load the function.
Load(function);
- VirtualFrame::SpilledScope spilled_scope(frame_);
-
// Pass the global proxy as the receiver.
- LoadGlobalReceiver(r0);
+ LoadGlobalReceiver(VirtualFrame::scratch0());
// Call the function.
CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
@@ -4176,21 +4200,21 @@ void CodeGenerator::VisitCallNew(CallNew* node) {
void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
- VirtualFrame::SpilledScope spilled_scope(frame_);
- ASSERT(args->length() == 1);
- JumpTarget leave, null, function, non_function_constructor;
+ Register scratch = VirtualFrame::scratch0();
+ JumpTarget null, function, leave, non_function_constructor;
- // Load the object into r0.
+ // Load the object into register.
+ ASSERT(args->length() == 1);
Load(args->at(0));
- frame_->EmitPop(r0);
+ Register tos = frame_->PopToRegister();
// If the object is a smi, we return null.
- __ tst(r0, Operand(kSmiTagMask));
+ __ tst(tos, Operand(kSmiTagMask));
null.Branch(eq);
// Check that the object is a JS object but take special care of JS
// functions to make sure they have 'Function' as their class.
- __ CompareObjectType(r0, r0, r1, FIRST_JS_OBJECT_TYPE);
+ __ CompareObjectType(tos, tos, scratch, FIRST_JS_OBJECT_TYPE);
null.Branch(lt);
// As long as JS_FUNCTION_TYPE is the last instance type and it is
@@ -4198,37 +4222,38 @@ void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
// LAST_JS_OBJECT_TYPE.
STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
STATIC_ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
- __ cmp(r1, Operand(JS_FUNCTION_TYPE));
+ __ cmp(scratch, Operand(JS_FUNCTION_TYPE));
function.Branch(eq);
// Check if the constructor in the map is a function.
- __ ldr(r0, FieldMemOperand(r0, Map::kConstructorOffset));
- __ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
+ __ ldr(tos, FieldMemOperand(tos, Map::kConstructorOffset));
+ __ CompareObjectType(tos, scratch, scratch, JS_FUNCTION_TYPE);
non_function_constructor.Branch(ne);
- // The r0 register now contains the constructor function. Grab the
+ // The tos register now contains the constructor function. Grab the
// instance class name from there.
- __ ldr(r0, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r0, FieldMemOperand(r0, SharedFunctionInfo::kInstanceClassNameOffset));
- frame_->EmitPush(r0);
+ __ ldr(tos, FieldMemOperand(tos, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(tos,
+ FieldMemOperand(tos, SharedFunctionInfo::kInstanceClassNameOffset));
+ frame_->EmitPush(tos);
leave.Jump();
// Functions have class 'Function'.
function.Bind();
- __ mov(r0, Operand(Factory::function_class_symbol()));
- frame_->EmitPush(r0);
+ __ mov(tos, Operand(Factory::function_class_symbol()));
+ frame_->EmitPush(tos);
leave.Jump();
// Objects with a non-function constructor have class 'Object'.
non_function_constructor.Bind();
- __ mov(r0, Operand(Factory::Object_symbol()));
- frame_->EmitPush(r0);
+ __ mov(tos, Operand(Factory::Object_symbol()));
+ frame_->EmitPush(tos);
leave.Jump();
// Non-JS objects have class null.
null.Bind();
- __ LoadRoot(r0, Heap::kNullValueRootIndex);
- frame_->EmitPush(r0);
+ __ LoadRoot(tos, Heap::kNullValueRootIndex);
+ frame_->EmitPush(tos);
// All done.
leave.Bind();
@@ -4236,45 +4261,51 @@ void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
- VirtualFrame::SpilledScope spilled_scope(frame_);
- ASSERT(args->length() == 1);
+ Register scratch = VirtualFrame::scratch0();
JumpTarget leave;
+
+ ASSERT(args->length() == 1);
Load(args->at(0));
- frame_->EmitPop(r0); // r0 contains object.
+ Register tos = frame_->PopToRegister(); // tos contains object.
// if (object->IsSmi()) return the object.
- __ tst(r0, Operand(kSmiTagMask));
+ __ tst(tos, Operand(kSmiTagMask));
leave.Branch(eq);
// It is a heap object - get map. If (!object->IsJSValue()) return the object.
- __ CompareObjectType(r0, r1, r1, JS_VALUE_TYPE);
+ __ CompareObjectType(tos, scratch, scratch, JS_VALUE_TYPE);
leave.Branch(ne);
// Load the value.
- __ ldr(r0, FieldMemOperand(r0, JSValue::kValueOffset));
+ __ ldr(tos, FieldMemOperand(tos, JSValue::kValueOffset));
leave.Bind();
- frame_->EmitPush(r0);
+ frame_->EmitPush(tos);
}
void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
- VirtualFrame::SpilledScope spilled_scope(frame_);
- ASSERT(args->length() == 2);
+ Register scratch1 = VirtualFrame::scratch0();
+ Register scratch2 = VirtualFrame::scratch1();
JumpTarget leave;
+
+ ASSERT(args->length() == 2);
Load(args->at(0)); // Load the object.
Load(args->at(1)); // Load the value.
- frame_->EmitPop(r0); // r0 contains value
- frame_->EmitPop(r1); // r1 contains object
+ Register value = frame_->PopToRegister();
+ Register object = frame_->PopToRegister(value);
// if (object->IsSmi()) return object.
- __ tst(r1, Operand(kSmiTagMask));
+ __ tst(object, Operand(kSmiTagMask));
leave.Branch(eq);
// It is a heap object - get map. If (!object->IsJSValue()) return the object.
- __ CompareObjectType(r1, r2, r2, JS_VALUE_TYPE);
+ __ CompareObjectType(object, scratch1, scratch1, JS_VALUE_TYPE);
leave.Branch(ne);
// Store the value.
- __ str(r0, FieldMemOperand(r1, JSValue::kValueOffset));
+ __ str(value, FieldMemOperand(object, JSValue::kValueOffset));
// Update the write barrier.
- __ RecordWrite(r1, Operand(JSValue::kValueOffset - kHeapObjectTag), r2, r3);
+ __ RecordWrite(object,
+ Operand(JSValue::kValueOffset - kHeapObjectTag),
+ scratch1,
+ scratch2);
// Leave.
leave.Bind();
- frame_->EmitPush(r0);
+ frame_->EmitPush(value);
}
@@ -4558,22 +4589,18 @@ class DeferredStringCharCodeAt : public DeferredCode {
// This generates code that performs a String.prototype.charCodeAt() call
// or returns a smi in order to trigger conversion.
void CodeGenerator::GenerateStringCharCodeAt(ZoneList<Expression*>* args) {
- VirtualFrame::SpilledScope spilled_scope(frame_);
Comment(masm_, "[ GenerateStringCharCodeAt");
ASSERT(args->length() == 2);
Load(args->at(0));
Load(args->at(1));
- Register index = r1;
- Register object = r2;
-
- frame_->EmitPop(r1);
- frame_->EmitPop(r2);
+ Register index = frame_->PopToRegister();
+ Register object = frame_->PopToRegister(index);
// We need two extra registers.
- Register scratch = r3;
- Register result = r0;
+ Register scratch = VirtualFrame::scratch0();
+ Register result = VirtualFrame::scratch1();
DeferredStringCharCodeAt* deferred =
new DeferredStringCharCodeAt(object,
@@ -4608,16 +4635,13 @@ class DeferredStringCharFromCode : public DeferredCode {
// Generates code for creating a one-char string from a char code.
void CodeGenerator::GenerateStringCharFromCode(ZoneList<Expression*>* args) {
- VirtualFrame::SpilledScope spilled_scope(frame_);
Comment(masm_, "[ GenerateStringCharFromCode");
ASSERT(args->length() == 1);
Load(args->at(0));
- Register code = r1;
- Register result = r0;
-
- frame_->EmitPop(code);
+ Register result = frame_->GetTOSRegister();
+ Register code = frame_->PopToRegister(result);
DeferredStringCharFromCode* deferred = new DeferredStringCharFromCode(
code, result);
@@ -4679,23 +4703,20 @@ class DeferredStringCharAt : public DeferredCode {
// This generates code that performs a String.prototype.charAt() call
// or returns a smi in order to trigger conversion.
void CodeGenerator::GenerateStringCharAt(ZoneList<Expression*>* args) {
- VirtualFrame::SpilledScope spilled_scope(frame_);
Comment(masm_, "[ GenerateStringCharAt");
ASSERT(args->length() == 2);
Load(args->at(0));
Load(args->at(1));
- Register index = r1;
- Register object = r2;
-
- frame_->EmitPop(r1);
- frame_->EmitPop(r2);
+ Register index = frame_->PopToRegister();
+ Register object = frame_->PopToRegister(index);
// We need three extra registers.
- Register scratch1 = r3;
- Register scratch2 = r4;
- Register result = r0;
+ Register scratch1 = VirtualFrame::scratch0();
+ Register scratch2 = VirtualFrame::scratch1();
+ // Use r6 without notifying the virtual frame.
+ Register result = r6;
DeferredStringCharAt* deferred =
new DeferredStringCharAt(object,
@@ -4793,6 +4814,152 @@ void CodeGenerator::GenerateIsSpecObject(ZoneList<Expression*>* args) {
}
+// Deferred code to check whether the String JavaScript object is safe for using
+// default value of. This code is called after the bit caching this information
+// in the map has been checked with the map for the object in the map_result_
+// register. On return the register map_result_ contains 1 for true and 0 for
+// false.
+class DeferredIsStringWrapperSafeForDefaultValueOf : public DeferredCode {
+ public:
+ DeferredIsStringWrapperSafeForDefaultValueOf(Register object,
+ Register map_result,
+ Register scratch1,
+ Register scratch2)
+ : object_(object),
+ map_result_(map_result),
+ scratch1_(scratch1),
+ scratch2_(scratch2) { }
+
+ virtual void Generate() {
+ Label false_result;
+
+ // Check that map is loaded as expected.
+ if (FLAG_debug_code) {
+ __ ldr(ip, FieldMemOperand(object_, HeapObject::kMapOffset));
+ __ cmp(map_result_, ip);
+ __ Assert(eq, "Map not in expected register");
+ }
+
+ // Check for fast case object. Generate false result for slow case object.
+ __ ldr(scratch1_, FieldMemOperand(object_, JSObject::kPropertiesOffset));
+ __ ldr(scratch1_, FieldMemOperand(scratch1_, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
+ __ cmp(scratch1_, ip);
+ __ b(eq, &false_result);
+
+ // Look for valueOf symbol in the descriptor array, and indicate false if
+ // found. The type is not checked, so if it is a transition it is a false
+ // negative.
+ __ ldr(map_result_,
+ FieldMemOperand(map_result_, Map::kInstanceDescriptorsOffset));
+ __ ldr(scratch2_, FieldMemOperand(map_result_, FixedArray::kLengthOffset));
+ // map_result_: descriptor array
+ // scratch2_: length of descriptor array
+ // Calculate the end of the descriptor array.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kPointerSize == 4);
+ __ add(scratch1_,
+ map_result_,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ add(scratch1_,
+ scratch1_,
+ Operand(scratch2_, LSL, kPointerSizeLog2 - kSmiTagSize));
+
+ // Calculate location of the first key name.
+ __ add(map_result_,
+ map_result_,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag +
+ DescriptorArray::kFirstIndex * kPointerSize));
+ // Loop through all the keys in the descriptor array. If one of these is the
+ // symbol valueOf the result is false.
+ Label entry, loop;
+ // The use of ip to store the valueOf symbol asumes that it is not otherwise
+ // used in the loop below.
+ __ mov(ip, Operand(Factory::value_of_symbol()));
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ ldr(scratch2_, MemOperand(map_result_, 0));
+ __ cmp(scratch2_, ip);
+ __ b(eq, &false_result);
+ __ add(map_result_, map_result_, Operand(kPointerSize));
+ __ bind(&entry);
+ __ cmp(map_result_, Operand(scratch1_));
+ __ b(ne, &loop);
+
+ // Reload map as register map_result_ was used as temporary above.
+ __ ldr(map_result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+
+ // If a valueOf property is not found on the object check that it's
+ // prototype is the un-modified String prototype. If not result is false.
+ __ ldr(scratch1_, FieldMemOperand(map_result_, Map::kPrototypeOffset));
+ __ tst(scratch1_, Operand(kSmiTagMask));
+ __ b(eq, &false_result);
+ __ ldr(scratch1_, FieldMemOperand(scratch1_, HeapObject::kMapOffset));
+ __ ldr(scratch2_,
+ CodeGenerator::ContextOperand(cp, Context::GLOBAL_INDEX));
+ __ ldr(scratch2_,
+ FieldMemOperand(scratch2_, GlobalObject::kGlobalContextOffset));
+ __ ldr(scratch2_,
+ CodeGenerator::ContextOperand(
+ scratch2_, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
+ __ cmp(scratch1_, scratch2_);
+ __ b(ne, &false_result);
+
+ // Set the bit in the map to indicate that it has been checked safe for
+ // default valueOf and set true result.
+ __ ldr(scratch1_, FieldMemOperand(map_result_, Map::kBitField2Offset));
+ __ orr(scratch1_,
+ scratch1_,
+ Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
+ __ str(scratch1_, FieldMemOperand(map_result_, Map::kBitField2Offset));
+ __ mov(map_result_, Operand(1));
+ __ jmp(exit_label());
+ __ bind(&false_result);
+ // Set false result.
+ __ mov(map_result_, Operand(0));
+ }
+
+ private:
+ Register object_;
+ Register map_result_;
+ Register scratch1_;
+ Register scratch2_;
+};
+
+
+void CodeGenerator::GenerateIsStringWrapperSafeForDefaultValueOf(
+ ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ Register obj = frame_->PopToRegister(); // Pop the string wrapper.
+ if (FLAG_debug_code) {
+ __ AbortIfSmi(obj);
+ }
+
+ // Check whether this map has already been checked to be safe for default
+ // valueOf.
+ Register map_result = VirtualFrame::scratch0();
+ __ ldr(map_result, FieldMemOperand(obj, HeapObject::kMapOffset));
+ __ ldrb(ip, FieldMemOperand(map_result, Map::kBitField2Offset));
+ __ tst(ip, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
+ true_target()->Branch(ne);
+
+ // We need an additional two scratch registers for the deferred code.
+ Register scratch1 = VirtualFrame::scratch1();
+ // Use r6 without notifying the virtual frame.
+ Register scratch2 = r6;
+
+ DeferredIsStringWrapperSafeForDefaultValueOf* deferred =
+ new DeferredIsStringWrapperSafeForDefaultValueOf(
+ obj, map_result, scratch1, scratch2);
+ deferred->Branch(eq);
+ deferred->BindExit();
+ __ tst(map_result, Operand(map_result));
+ cc_reg_ = ne;
+}
+
+
void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
// This generates a fast version of:
// (%_ClassOf(arg) === 'Function')
@@ -4874,13 +5041,13 @@ void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) {
- VirtualFrame::SpilledScope spilled_scope(frame_);
ASSERT(args->length() == 1);
// Satisfy contract with ArgumentsAccessStub:
// Load the key into r1 and the formal parameters count into r0.
Load(args->at(0));
- frame_->EmitPop(r1);
+ frame_->PopToR1();
+ frame_->SpillAll();
__ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
// Call the shared stub to get to arguments[key].
@@ -5108,9 +5275,7 @@ class DeferredSearchCache: public DeferredCode {
void DeferredSearchCache::Generate() {
__ Push(cache_, key_);
__ CallRuntime(Runtime::kGetFromCache, 2);
- if (!dst_.is(r0)) {
- __ mov(dst_, r0);
- }
+ __ Move(dst_, r0);
}
@@ -5130,33 +5295,42 @@ void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) {
Load(args->at(1));
- VirtualFrame::SpilledScope spilled_scope(frame_);
-
- frame_->EmitPop(r2);
+ frame_->PopToR1();
+ frame_->SpillAll();
+ Register key = r1; // Just poped to r1
+ Register result = r0; // Free, as frame has just been spilled.
+ Register scratch1 = VirtualFrame::scratch0();
+ Register scratch2 = VirtualFrame::scratch1();
- __ ldr(r1, ContextOperand(cp, Context::GLOBAL_INDEX));
- __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalContextOffset));
- __ ldr(r1, ContextOperand(r1, Context::JSFUNCTION_RESULT_CACHES_INDEX));
- __ ldr(r1, FieldMemOperand(r1, FixedArray::OffsetOfElementAt(cache_id)));
+ __ ldr(scratch1, ContextOperand(cp, Context::GLOBAL_INDEX));
+ __ ldr(scratch1,
+ FieldMemOperand(scratch1, GlobalObject::kGlobalContextOffset));
+ __ ldr(scratch1,
+ ContextOperand(scratch1, Context::JSFUNCTION_RESULT_CACHES_INDEX));
+ __ ldr(scratch1,
+ FieldMemOperand(scratch1, FixedArray::OffsetOfElementAt(cache_id)));
- DeferredSearchCache* deferred = new DeferredSearchCache(r0, r1, r2);
+ DeferredSearchCache* deferred =
+ new DeferredSearchCache(result, scratch1, key);
const int kFingerOffset =
FixedArray::OffsetOfElementAt(JSFunctionResultCache::kFingerIndex);
STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- __ ldr(r0, FieldMemOperand(r1, kFingerOffset));
- // r0 now holds finger offset as a smi.
- __ add(r3, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- // r3 now points to the start of fixed array elements.
- __ ldr(r0, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize, PreIndex));
- // Note side effect of PreIndex: r3 now points to the key of the pair.
- __ cmp(r2, r0);
+ __ ldr(result, FieldMemOperand(scratch1, kFingerOffset));
+ // result now holds finger offset as a smi.
+ __ add(scratch2, scratch1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ // scratch2 now points to the start of fixed array elements.
+ __ ldr(result,
+ MemOperand(
+ scratch2, result, LSL, kPointerSizeLog2 - kSmiTagSize, PreIndex));
+ // Note side effect of PreIndex: scratch2 now points to the key of the pair.
+ __ cmp(key, result);
deferred->Branch(ne);
- __ ldr(r0, MemOperand(r3, kPointerSize));
+ __ ldr(result, MemOperand(scratch2, kPointerSize));
deferred->BindExit();
- frame_->EmitPush(r0);
+ frame_->EmitPush(result);
}
@@ -6851,6 +7025,11 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
__ str(cp, FieldMemOperand(r0, JSFunction::kContextOffset));
__ str(r1, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
+ // Initialize the code pointer in the function to be the one
+ // found in the shared function info object.
+ __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset));
+ __ str(r3, FieldMemOperand(r0, JSFunction::kCodeOffset));
+
// Return result. The argument function info has been popped already.
__ Ret();
@@ -7801,6 +7980,77 @@ void CompareStub::Generate(MacroAssembler* masm) {
}
+// This stub does not handle the inlined cases (Smis, Booleans, undefined).
+// The stub returns zero for false, and a non-zero value for true.
+void ToBooleanStub::Generate(MacroAssembler* masm) {
+ Label false_result;
+ Label not_heap_number;
+ Register scratch0 = VirtualFrame::scratch0();
+
+ // HeapNumber => false iff +0, -0, or NaN.
+ __ ldr(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
+ __ cmp(scratch0, ip);
+ __ b(&not_heap_number, ne);
+
+ __ sub(ip, tos_, Operand(kHeapObjectTag));
+ __ vldr(d1, ip, HeapNumber::kValueOffset);
+ __ vcmp(d1, 0.0);
+ __ vmrs(pc);
+ // "tos_" is a register, and contains a non zero value by default.
+ // Hence we only need to overwrite "tos_" with zero to return false for
+ // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
+ __ mov(tos_, Operand(0), LeaveCC, eq); // for FP_ZERO
+ __ mov(tos_, Operand(0), LeaveCC, vs); // for FP_NAN
+ __ Ret();
+
+ __ bind(&not_heap_number);
+
+ // Check if the value is 'null'.
+ // 'null' => false.
+ __ LoadRoot(ip, Heap::kNullValueRootIndex);
+ __ cmp(tos_, ip);
+ __ b(&false_result, eq);
+
+ // It can be an undetectable object.
+ // Undetectable => false.
+ __ ldr(ip, FieldMemOperand(tos_, HeapObject::kMapOffset));
+ __ ldrb(scratch0, FieldMemOperand(ip, Map::kBitFieldOffset));
+ __ and_(scratch0, scratch0, Operand(1 << Map::kIsUndetectable));
+ __ cmp(scratch0, Operand(1 << Map::kIsUndetectable));
+ __ b(&false_result, eq);
+
+ // JavaScript object => true.
+ __ ldr(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
+ __ ldrb(scratch0, FieldMemOperand(scratch0, Map::kInstanceTypeOffset));
+ __ cmp(scratch0, Operand(FIRST_JS_OBJECT_TYPE));
+ // "tos_" is a register and contains a non-zero value.
+ // Hence we implicitly return true if the greater than
+ // condition is satisfied.
+ __ Ret(gt);
+
+ // Check for string
+ __ ldr(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
+ __ ldrb(scratch0, FieldMemOperand(scratch0, Map::kInstanceTypeOffset));
+ __ cmp(scratch0, Operand(FIRST_NONSTRING_TYPE));
+ // "tos_" is a register and contains a non-zero value.
+ // Hence we implicitly return true if the greater than
+ // condition is satisfied.
+ __ Ret(gt);
+
+ // String value => false iff empty, i.e., length is zero
+ __ ldr(tos_, FieldMemOperand(tos_, String::kLengthOffset));
+ // If length is zero, "tos_" contains zero ==> false.
+ // If length is not zero, "tos_" contains a non-zero value ==> true.
+ __ Ret();
+
+ // Return 0 in "tos_" for false .
+ __ bind(&false_result);
+ __ mov(tos_, Operand(0));
+ __ Ret();
+}
+
+
// We fall into this code if the operands were Smis, but the result was
// not (eg. overflow). We branch into this code (to the not_smi label) if
// the operands were not both Smi. The operands are in r0 and r1. In order
@@ -10444,11 +10694,9 @@ void StringCharCodeAtGenerator::GenerateSlow(
// NumberToSmi discards numbers that are not exact integers.
__ CallRuntime(Runtime::kNumberToSmi, 1);
}
- if (!scratch_.is(r0)) {
- // Save the conversion result before the pop instructions below
- // have a chance to overwrite it.
- __ mov(scratch_, r0);
- }
+ // Save the conversion result before the pop instructions below
+ // have a chance to overwrite it.
+ __ Move(scratch_, r0);
__ pop(index_);
__ pop(object_);
// Reload the instance type.
@@ -10467,9 +10715,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
call_helper.BeforeCall(masm);
__ Push(object_, index_);
__ CallRuntime(Runtime::kStringCharCodeAt, 2);
- if (!result_.is(r0)) {
- __ mov(result_, r0);
- }
+ __ Move(result_, r0);
call_helper.AfterCall(masm);
__ jmp(&exit_);
@@ -10510,9 +10756,7 @@ void StringCharFromCodeGenerator::GenerateSlow(
call_helper.BeforeCall(masm);
__ push(code_);
__ CallRuntime(Runtime::kCharFromCode, 1);
- if (!result_.is(r0)) {
- __ mov(result_, r0);
- }
+ __ Move(result_, r0);
call_helper.AfterCall(masm);
__ jmp(&exit_);
diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h
index bfe20809b..029d59900 100644
--- a/deps/v8/src/arm/codegen-arm.h
+++ b/deps/v8/src/arm/codegen-arm.h
@@ -286,6 +286,10 @@ class CodeGenerator: public AstVisitor {
return inlined_write_barrier_size_ + 4;
}
+ static MemOperand ContextOperand(Register context, int index) {
+ return MemOperand(context, Context::SlotOffset(index));
+ }
+
private:
// Construction/Destruction
explicit CodeGenerator(MacroAssembler* masm);
@@ -338,10 +342,6 @@ class CodeGenerator: public AstVisitor {
void LoadReference(Reference* ref);
void UnloadReference(Reference* ref);
- static MemOperand ContextOperand(Register context, int index) {
- return MemOperand(context, Context::SlotOffset(index));
- }
-
MemOperand SlotOperand(Slot* slot, Register tmp);
MemOperand ContextSlotOperandCheckExtensions(Slot* slot,
@@ -482,6 +482,8 @@ class CodeGenerator: public AstVisitor {
void GenerateIsSpecObject(ZoneList<Expression*>* args);
void GenerateIsFunction(ZoneList<Expression*>* args);
void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
+ void GenerateIsStringWrapperSafeForDefaultValueOf(
+ ZoneList<Expression*>* args);
// Support for construct call checks.
void GenerateIsConstructCall(ZoneList<Expression*>* args);
@@ -623,6 +625,19 @@ class TranscendentalCacheStub: public CodeStub {
};
+class ToBooleanStub: public CodeStub {
+ public:
+ explicit ToBooleanStub(Register tos) : tos_(tos) { }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ Register tos_;
+ Major MajorKey() { return ToBoolean; }
+ int MinorKey() { return tos_.code(); }
+};
+
+
class GenericBinaryOpStub : public CodeStub {
public:
GenericBinaryOpStub(Token::Value op,
diff --git a/deps/v8/src/arm/debug-arm.cc b/deps/v8/src/arm/debug-arm.cc
index e87d265e8..3a948451b 100644
--- a/deps/v8/src/arm/debug-arm.cc
+++ b/deps/v8/src/arm/debug-arm.cc
@@ -293,15 +293,11 @@ void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
masm->Abort("LiveEdit frame dropping is not supported on arm");
}
+const bool Debug::kFrameDropperSupported = false;
+
#undef __
-Object** Debug::SetUpFrameDropperFrame(StackFrame* bottom_js_frame,
- Handle<Code> code) {
- UNREACHABLE();
- return NULL;
-}
-const int Debug::kFrameDropperFrameSize = -1;
#endif // ENABLE_DEBUGGER_SUPPORT
diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc
index fd142bd96..0029ed168 100644
--- a/deps/v8/src/arm/disasm-arm.cc
+++ b/deps/v8/src/arm/disasm-arm.cc
@@ -1188,7 +1188,13 @@ void Decoder::DecodeVCMP(Instr* instr) {
bool raise_exception_for_qnan = (instr->Bit(7) == 0x1);
if (dp_operation && !raise_exception_for_qnan) {
- Format(instr, "vcmp.f64'cond 'Dd, 'Dm");
+ if (instr->Opc2Field() == 0x4) {
+ Format(instr, "vcmp.f64'cond 'Dd, 'Dm");
+ } else if (instr->Opc2Field() == 0x5) {
+ Format(instr, "vcmp.f64'cond 'Dd, #0.0");
+ } else {
+ Unknown(instr); // invalid
+ }
} else {
Unknown(instr); // Not used by V8.
}
diff --git a/deps/v8/src/arm/fast-codegen-arm.cc b/deps/v8/src/arm/fast-codegen-arm.cc
deleted file mode 100644
index 36ac2aa3d..000000000
--- a/deps/v8/src/arm/fast-codegen-arm.cc
+++ /dev/null
@@ -1,241 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_ARM)
-
-#include "codegen-inl.h"
-#include "fast-codegen.h"
-#include "scopes.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm())
-
-Register FastCodeGenerator::accumulator0() { return r0; }
-Register FastCodeGenerator::accumulator1() { return r1; }
-Register FastCodeGenerator::scratch0() { return r3; }
-Register FastCodeGenerator::scratch1() { return r4; }
-Register FastCodeGenerator::scratch2() { return r5; }
-Register FastCodeGenerator::receiver_reg() { return r2; }
-Register FastCodeGenerator::context_reg() { return cp; }
-
-
-void FastCodeGenerator::EmitLoadReceiver() {
- // Offset 2 is due to return address and saved frame pointer.
- int index = 2 + scope()->num_parameters();
- __ ldr(receiver_reg(), MemOperand(sp, index * kPointerSize));
-}
-
-
-void FastCodeGenerator::EmitGlobalVariableLoad(Handle<Object> cell) {
- ASSERT(!destination().is(no_reg));
- ASSERT(cell->IsJSGlobalPropertyCell());
-
- __ mov(destination(), Operand(cell));
- __ ldr(destination(),
- FieldMemOperand(destination(), JSGlobalPropertyCell::kValueOffset));
- if (FLAG_debug_code) {
- __ mov(ip, Operand(Factory::the_hole_value()));
- __ cmp(destination(), ip);
- __ Check(ne, "DontDelete cells can't contain the hole");
- }
-
- // The loaded value is not known to be a smi.
- clear_as_smi(destination());
-}
-
-
-void FastCodeGenerator::EmitThisPropertyStore(Handle<String> name) {
- LookupResult lookup;
- info()->receiver()->Lookup(*name, &lookup);
-
- ASSERT(lookup.holder() == *info()->receiver());
- ASSERT(lookup.type() == FIELD);
- Handle<Map> map(Handle<HeapObject>::cast(info()->receiver())->map());
- int index = lookup.GetFieldIndex() - map->inobject_properties();
- int offset = index * kPointerSize;
-
- // We will emit the write barrier unless the stored value is statically
- // known to be a smi.
- bool needs_write_barrier = !is_smi(accumulator0());
-
- // Negative offsets are inobject properties.
- if (offset < 0) {
- offset += map->instance_size();
- __ str(accumulator0(), FieldMemOperand(receiver_reg(), offset));
- if (needs_write_barrier) {
- // Preserve receiver from write barrier.
- __ mov(scratch0(), receiver_reg());
- }
- } else {
- offset += FixedArray::kHeaderSize;
- __ ldr(scratch0(),
- FieldMemOperand(receiver_reg(), JSObject::kPropertiesOffset));
- __ str(accumulator0(), FieldMemOperand(scratch0(), offset));
- }
-
- if (needs_write_barrier) {
- __ RecordWrite(scratch0(), Operand(offset), scratch1(), scratch2());
- }
-
- if (destination().is(accumulator1())) {
- __ mov(accumulator1(), accumulator0());
- if (is_smi(accumulator0())) {
- set_as_smi(accumulator1());
- } else {
- clear_as_smi(accumulator1());
- }
- }
-}
-
-
-void FastCodeGenerator::EmitThisPropertyLoad(Handle<String> name) {
- ASSERT(!destination().is(no_reg));
- LookupResult lookup;
- info()->receiver()->Lookup(*name, &lookup);
-
- ASSERT(lookup.holder() == *info()->receiver());
- ASSERT(lookup.type() == FIELD);
- Handle<Map> map(Handle<HeapObject>::cast(info()->receiver())->map());
- int index = lookup.GetFieldIndex() - map->inobject_properties();
- int offset = index * kPointerSize;
-
- // Perform the load. Negative offsets are inobject properties.
- if (offset < 0) {
- offset += map->instance_size();
- __ ldr(destination(), FieldMemOperand(receiver_reg(), offset));
- } else {
- offset += FixedArray::kHeaderSize;
- __ ldr(scratch0(),
- FieldMemOperand(receiver_reg(), JSObject::kPropertiesOffset));
- __ ldr(destination(), FieldMemOperand(scratch0(), offset));
- }
-
- // The loaded value is not known to be a smi.
- clear_as_smi(destination());
-}
-
-
-void FastCodeGenerator::EmitBitOr() {
- if (is_smi(accumulator0()) && is_smi(accumulator1())) {
- // If both operands are known to be a smi then there is no need to check
- // the operands or result. There is no need to perform the operation in
- // an effect context.
- if (!destination().is(no_reg)) {
- __ orr(destination(), accumulator1(), Operand(accumulator0()));
- }
- } else {
- // Left is in accumulator1, right in accumulator0.
- if (destination().is(accumulator0())) {
- __ mov(scratch0(), accumulator0());
- __ orr(destination(), accumulator1(), Operand(accumulator1()));
- Label* bailout =
- info()->AddBailout(accumulator1(), scratch0()); // Left, right.
- __ BranchOnNotSmi(destination(), bailout);
- } else if (destination().is(accumulator1())) {
- __ mov(scratch0(), accumulator1());
- __ orr(destination(), accumulator1(), Operand(accumulator0()));
- Label* bailout = info()->AddBailout(scratch0(), accumulator0());
- __ BranchOnNotSmi(destination(), bailout);
- } else {
- ASSERT(destination().is(no_reg));
- __ orr(scratch0(), accumulator1(), Operand(accumulator0()));
- Label* bailout = info()->AddBailout(accumulator1(), accumulator0());
- __ BranchOnNotSmi(scratch0(), bailout);
- }
- }
-
- // If we didn't bailout, the result (in fact, both inputs too) is known to
- // be a smi.
- set_as_smi(accumulator0());
- set_as_smi(accumulator1());
-}
-
-
-void FastCodeGenerator::Generate(CompilationInfo* compilation_info) {
- ASSERT(info_ == NULL);
- info_ = compilation_info;
- Comment cmnt(masm_, "[ function compiled by fast code generator");
-
- // Save the caller's frame pointer and set up our own.
- Comment prologue_cmnt(masm(), ";; Prologue");
- __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
- __ add(fp, sp, Operand(2 * kPointerSize));
- // Note that we keep a live register reference to cp (context) at
- // this point.
-
- Label* bailout_to_beginning = info()->AddBailout();
- // Receiver (this) is allocated to a fixed register.
- if (info()->has_this_properties()) {
- Comment cmnt(masm(), ";; MapCheck(this)");
- if (FLAG_print_ir) {
- PrintF("MapCheck(this)\n");
- }
- ASSERT(info()->has_receiver() && info()->receiver()->IsHeapObject());
- Handle<HeapObject> object = Handle<HeapObject>::cast(info()->receiver());
- Handle<Map> map(object->map());
- EmitLoadReceiver();
- __ CheckMap(receiver_reg(), scratch0(), map, bailout_to_beginning, false);
- }
-
- // If there is a global variable access check if the global object is the
- // same as at lazy-compilation time.
- if (info()->has_globals()) {
- Comment cmnt(masm(), ";; MapCheck(GLOBAL)");
- if (FLAG_print_ir) {
- PrintF("MapCheck(GLOBAL)\n");
- }
- ASSERT(info()->has_global_object());
- Handle<Map> map(info()->global_object()->map());
- __ ldr(scratch0(), CodeGenerator::GlobalObject());
- __ CheckMap(scratch0(), scratch1(), map, bailout_to_beginning, true);
- }
-
- VisitStatements(function()->body());
-
- Comment return_cmnt(masm(), ";; Return(<undefined>)");
- if (FLAG_print_ir) {
- PrintF("Return(<undefined>)\n");
- }
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- __ mov(sp, fp);
- __ ldm(ia_w, sp, fp.bit() | lr.bit());
- int32_t sp_delta = (scope()->num_parameters() + 1) * kPointerSize;
- __ add(sp, sp, Operand(sp_delta));
- __ Jump(lr);
-}
-
-
-#undef __
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc
index ea5a8f2a8..b58a4a585 100644
--- a/deps/v8/src/arm/full-codegen-arm.cc
+++ b/deps/v8/src/arm/full-codegen-arm.cc
@@ -55,99 +55,97 @@ namespace internal {
//
// The function builds a JS frame. Please see JavaScriptFrameConstants in
// frames-arm.h for its layout.
-void FullCodeGenerator::Generate(CompilationInfo* info, Mode mode) {
+void FullCodeGenerator::Generate(CompilationInfo* info) {
ASSERT(info_ == NULL);
info_ = info;
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
- if (mode == PRIMARY) {
- int locals_count = scope()->num_stack_slots();
+ int locals_count = scope()->num_stack_slots();
- __ Push(lr, fp, cp, r1);
- if (locals_count > 0) {
- // Load undefined value here, so the value is ready for the loop
- // below.
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- }
- // Adjust fp to point to caller's fp.
- __ add(fp, sp, Operand(2 * kPointerSize));
+ __ Push(lr, fp, cp, r1);
+ if (locals_count > 0) {
+ // Load undefined value here, so the value is ready for the loop
+ // below.
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ }
+ // Adjust fp to point to caller's fp.
+ __ add(fp, sp, Operand(2 * kPointerSize));
- { Comment cmnt(masm_, "[ Allocate locals");
- for (int i = 0; i < locals_count; i++) {
- __ push(ip);
- }
+ { Comment cmnt(masm_, "[ Allocate locals");
+ for (int i = 0; i < locals_count; i++) {
+ __ push(ip);
}
+ }
- bool function_in_register = true;
+ bool function_in_register = true;
- // Possibly allocate a local context.
- int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0) {
- Comment cmnt(masm_, "[ Allocate local context");
- // Argument to NewContext is the function, which is in r1.
- __ push(r1);
- if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(heap_slots);
- __ CallStub(&stub);
- } else {
- __ CallRuntime(Runtime::kNewContext, 1);
- }
- function_in_register = false;
- // Context is returned in both r0 and cp. It replaces the context
- // passed to us. It's saved in the stack and kept live in cp.
- __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- // Copy any necessary parameters into the context.
- int num_parameters = scope()->num_parameters();
- for (int i = 0; i < num_parameters; i++) {
- Slot* slot = scope()->parameter(i)->slot();
- if (slot != NULL && slot->type() == Slot::CONTEXT) {
- int parameter_offset = StandardFrameConstants::kCallerSPOffset +
- (num_parameters - 1 - i) * kPointerSize;
- // Load parameter from stack.
- __ ldr(r0, MemOperand(fp, parameter_offset));
- // Store it in the context.
- __ mov(r1, Operand(Context::SlotOffset(slot->index())));
- __ str(r0, MemOperand(cp, r1));
- // Update the write barrier. This clobbers all involved
- // registers, so we have to use two more registers to avoid
- // clobbering cp.
- __ mov(r2, Operand(cp));
- __ RecordWrite(r2, Operand(r1), r3, r0);
- }
+ // Possibly allocate a local context.
+ int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ if (heap_slots > 0) {
+ Comment cmnt(masm_, "[ Allocate local context");
+ // Argument to NewContext is the function, which is in r1.
+ __ push(r1);
+ if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(heap_slots);
+ __ CallStub(&stub);
+ } else {
+ __ CallRuntime(Runtime::kNewContext, 1);
+ }
+ function_in_register = false;
+ // Context is returned in both r0 and cp. It replaces the context
+ // passed to us. It's saved in the stack and kept live in cp.
+ __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Copy any necessary parameters into the context.
+ int num_parameters = scope()->num_parameters();
+ for (int i = 0; i < num_parameters; i++) {
+ Slot* slot = scope()->parameter(i)->slot();
+ if (slot != NULL && slot->type() == Slot::CONTEXT) {
+ int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+ (num_parameters - 1 - i) * kPointerSize;
+ // Load parameter from stack.
+ __ ldr(r0, MemOperand(fp, parameter_offset));
+ // Store it in the context.
+ __ mov(r1, Operand(Context::SlotOffset(slot->index())));
+ __ str(r0, MemOperand(cp, r1));
+ // Update the write barrier. This clobbers all involved
+ // registers, so we have to use two more registers to avoid
+ // clobbering cp.
+ __ mov(r2, Operand(cp));
+ __ RecordWrite(r2, Operand(r1), r3, r0);
}
}
+ }
- Variable* arguments = scope()->arguments()->AsVariable();
- if (arguments != NULL) {
- // Function uses arguments object.
- Comment cmnt(masm_, "[ Allocate arguments object");
- if (!function_in_register) {
- // Load this again, if it's used by the local context below.
- __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- } else {
- __ mov(r3, r1);
- }
- // Receiver is just before the parameters on the caller's stack.
- int offset = scope()->num_parameters() * kPointerSize;
- __ add(r2, fp,
- Operand(StandardFrameConstants::kCallerSPOffset + offset));
- __ mov(r1, Operand(Smi::FromInt(scope()->num_parameters())));
- __ Push(r3, r2, r1);
-
- // Arguments to ArgumentsAccessStub:
- // function, receiver address, parameter count.
- // The stub will rewrite receiever and parameter count if the previous
- // stack frame was an arguments adapter frame.
- ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
- __ CallStub(&stub);
- // Duplicate the value; move-to-slot operation might clobber registers.
- __ mov(r3, r0);
- Move(arguments->slot(), r0, r1, r2);
- Slot* dot_arguments_slot =
- scope()->arguments_shadow()->AsVariable()->slot();
- Move(dot_arguments_slot, r3, r1, r2);
+ Variable* arguments = scope()->arguments()->AsVariable();
+ if (arguments != NULL) {
+ // Function uses arguments object.
+ Comment cmnt(masm_, "[ Allocate arguments object");
+ if (!function_in_register) {
+ // Load this again, if it's used by the local context below.
+ __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ } else {
+ __ mov(r3, r1);
}
+ // Receiver is just before the parameters on the caller's stack.
+ int offset = scope()->num_parameters() * kPointerSize;
+ __ add(r2, fp,
+ Operand(StandardFrameConstants::kCallerSPOffset + offset));
+ __ mov(r1, Operand(Smi::FromInt(scope()->num_parameters())));
+ __ Push(r3, r2, r1);
+
+ // Arguments to ArgumentsAccessStub:
+ // function, receiver address, parameter count.
+ // The stub will rewrite receiever and parameter count if the previous
+ // stack frame was an arguments adapter frame.
+ ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
+ __ CallStub(&stub);
+ // Duplicate the value; move-to-slot operation might clobber registers.
+ __ mov(r3, r0);
+ Move(arguments->slot(), r0, r1, r2);
+ Slot* dot_arguments_slot =
+ scope()->arguments_shadow()->AsVariable()->slot();
+ Move(dot_arguments_slot, r3, r1, r2);
}
{ Comment cmnt(masm_, "[ Declarations");
@@ -956,15 +954,13 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ cmp(r4, Operand(r2));
__ b(eq, &update_each);
- // Convert the entry to a string or null if it isn't a property
- // anymore. If the property has been removed while iterating, we
+ // Convert the entry to a string or (smi) 0 if it isn't a property
+ // any more. If the property has been removed while iterating, we
// just skip it.
__ push(r1); // Enumerable.
__ push(r3); // Current entry.
__ InvokeBuiltin(Builtins::FILTER_KEY, CALL_JS);
- __ mov(r3, Operand(r0));
- __ LoadRoot(ip, Heap::kNullValueRootIndex);
- __ cmp(r3, ip);
+ __ mov(r3, Operand(r0), SetCC);
__ b(eq, loop_statement.continue_target());
// Update the 'each' property or variable from the possibly filtered
@@ -1959,6 +1955,26 @@ void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
}
+void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
+ ZoneList<Expression*>* args) {
+
+ ASSERT(args->length() == 1);
+
+ VisitForValue(args->at(0), kAccumulator);
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+ // Just indicate false, as %_IsStringWrapperSafeForDefaultValueOf() is only
+ // used in a few functions in runtime.js which should not normally be hit by
+ // this compiler.
+ __ jmp(if_false);
+ Apply(context_, if_true, if_false);
+}
+
+
void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index 9c25ccde2..38c7c28c9 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -757,7 +757,7 @@ void MacroAssembler::InvokeFunction(Register fun,
SharedFunctionInfo::kFormalParameterCountOffset));
mov(expected_reg, Operand(expected_reg, ASR, kSmiTagSize));
ldr(code_reg,
- MemOperand(code_reg, SharedFunctionInfo::kCodeOffset - kHeapObjectTag));
+ MemOperand(r1, JSFunction::kCodeOffset - kHeapObjectTag));
add(code_reg, code_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
ParameterCount expected(expected_reg);
@@ -1508,8 +1508,7 @@ void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
// Make sure the code objects in the builtins object and in the
// builtin function are the same.
push(r1);
- ldr(r1, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- ldr(r1, FieldMemOperand(r1, SharedFunctionInfo::kCodeOffset));
+ ldr(r1, FieldMemOperand(r1, JSFunction::kCodeOffset));
cmp(r1, target);
Assert(eq, "Builtin code object changed");
pop(r1);
@@ -1656,6 +1655,13 @@ void MacroAssembler::JumpIfEitherSmi(Register reg1,
}
+void MacroAssembler::AbortIfSmi(Register object) {
+ ASSERT_EQ(0, kSmiTag);
+ tst(object, Operand(kSmiTagMask));
+ Assert(ne, "Operand is a smi");
+}
+
+
void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
Register first,
Register second,
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index 37a1b1cb1..836ed7499 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -618,6 +618,9 @@ class MacroAssembler: public Assembler {
// Jump if either of the registers contain a smi.
void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
+ // Abort execution if argument is a smi. Used in debug code.
+ void AbortIfSmi(Register object);
+
// ---------------------------------------------------------------------------
// String utilities
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index 04635e3f9..c4cc8d46c 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -2431,11 +2431,17 @@ void Simulator::DecodeVCMP(Instr* instr) {
}
int d = GlueRegCode(!dp_operation, instr->VdField(), instr->DField());
- int m = GlueRegCode(!dp_operation, instr->VmField(), instr->MField());
+ int m = 0;
+ if (instr->Opc2Field() == 0x4) {
+ m = GlueRegCode(!dp_operation, instr->VmField(), instr->MField());
+ }
if (dp_operation) {
double dd_value = get_double_from_d_register(d);
- double dm_value = get_double_from_d_register(m);
+ double dm_value = 0.0;
+ if (instr->Opc2Field() == 0x4) {
+ dm_value = get_double_from_d_register(m);
+ }
Compute_FPSCR_Flags(dd_value, dm_value);
} else {
diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc
index 8c8e702d6..fa90ca7d1 100644
--- a/deps/v8/src/arm/stub-cache-arm.cc
+++ b/deps/v8/src/arm/stub-cache-arm.cc
@@ -1212,38 +1212,6 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
}
-Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
- // ----------- S t a t e -------------
- // -- r1: function
- // -- lr: return address
- // -----------------------------------
-
- // Enter an internal frame.
- __ EnterInternalFrame();
-
- // Preserve the function.
- __ push(r1);
-
- // Push the function on the stack as the argument to the runtime function.
- __ push(r1);
- __ CallRuntime(Runtime::kLazyCompile, 1);
-
- // Calculate the entry point.
- __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
-
- // Restore saved function.
- __ pop(r1);
-
- // Tear down temporary frame.
- __ LeaveInternalFrame();
-
- // Do a tail-call of the compiled function.
- __ Jump(r2);
-
- return GetCodeWithFlags(flags, "LazyCompileStub");
-}
-
-
void CallStubCompiler::GenerateNameCheck(String* name, Label* miss) {
if (kind_ == Code::KEYED_CALL_IC) {
__ cmp(r2, Operand(Handle<String>(name)));
diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h
index cf7020ece..157743359 100644
--- a/deps/v8/src/assembler.h
+++ b/deps/v8/src/assembler.h
@@ -235,6 +235,7 @@ class RelocInfo BASE_EMBEDDED {
INLINE(void set_call_object(Object* target));
INLINE(Object** call_object_address());
+ template<typename StaticVisitor> inline void Visit();
inline void Visit(ObjectVisitor* v);
// Patch the code with some other code.
diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc
index e1d4489d4..ce8e98d6a 100644
--- a/deps/v8/src/bootstrapper.cc
+++ b/deps/v8/src/bootstrapper.cc
@@ -36,6 +36,7 @@
#include "global-handles.h"
#include "macro-assembler.h"
#include "natives.h"
+#include "objects-visiting.h"
#include "snapshot.h"
#include "stub-cache.h"
@@ -56,7 +57,7 @@ class SourceCodeCache BASE_EMBEDDED {
}
void Iterate(ObjectVisitor* v) {
- v->VisitPointer(BitCast<Object**, FixedArray**>(&cache_));
+ v->VisitPointer(BitCast<Object**>(&cache_));
}
@@ -470,6 +471,7 @@ Handle<JSFunction> Genesis::CreateEmptyFunction() {
Handle<Code> code =
Handle<Code>(Builtins::builtin(Builtins::EmptyFunction));
empty_function->set_code(*code);
+ empty_function->shared()->set_code(*code);
Handle<String> source = Factory::NewStringFromAscii(CStrVector("() {}"));
Handle<Script> script = Factory::NewScript(source);
script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
@@ -812,9 +814,7 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
initial_map->set_instance_size(
initial_map->instance_size() + 5 * kPointerSize);
initial_map->set_instance_descriptors(*descriptors);
- initial_map->set_scavenger(
- Heap::GetScavenger(initial_map->instance_type(),
- initial_map->instance_size()));
+ initial_map->set_visitor_id(StaticVisitorBase::GetVisitorId(*initial_map));
}
{ // -- J S O N
@@ -1234,6 +1234,14 @@ bool Genesis::InstallNatives() {
InstallNativeFunctions();
+ // Store the map for the string prototype after the natives has been compiled
+ // and the String function has been setup.
+ Handle<JSFunction> string_function(global_context()->string_function());
+ ASSERT(JSObject::cast(
+ string_function->initial_map()->prototype())->HasFastProperties());
+ global_context()->set_string_function_prototype_map(
+ HeapObject::cast(string_function->initial_map()->prototype())->map());
+
InstallCustomCallGenerators();
// Install Function.prototype.call and apply.
@@ -1545,6 +1553,8 @@ bool Genesis::InstallJSBuiltins(Handle<JSBuiltinsObject> builtins) {
Handle<SharedFunctionInfo> shared
= Handle<SharedFunctionInfo>(function->shared());
if (!EnsureCompiled(shared, CLEAR_EXCEPTION)) return false;
+ // Set the code object on the function object.
+ function->set_code(function->shared()->code());
builtins->set_javascript_builtin_code(id, shared->code());
}
return true;
diff --git a/deps/v8/src/builtins.h b/deps/v8/src/builtins.h
index 3dcab627b..375e8f3f8 100644
--- a/deps/v8/src/builtins.h
+++ b/deps/v8/src/builtins.h
@@ -69,6 +69,7 @@ enum BuiltinExtraArguments {
V(JSConstructStubApi, BUILTIN, UNINITIALIZED) \
V(JSEntryTrampoline, BUILTIN, UNINITIALIZED) \
V(JSConstructEntryTrampoline, BUILTIN, UNINITIALIZED) \
+ V(LazyCompile, BUILTIN, UNINITIALIZED) \
\
V(LoadIC_Miss, BUILTIN, UNINITIALIZED) \
V(KeyedLoadIC_Miss, BUILTIN, UNINITIALIZED) \
@@ -249,6 +250,7 @@ class Builtins : public AllStatic {
static void Generate_JSConstructStubApi(MacroAssembler* masm);
static void Generate_JSEntryTrampoline(MacroAssembler* masm);
static void Generate_JSConstructEntryTrampoline(MacroAssembler* masm);
+ static void Generate_LazyCompile(MacroAssembler* masm);
static void Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm);
static void Generate_FunctionCall(MacroAssembler* masm);
diff --git a/deps/v8/src/checks.h b/deps/v8/src/checks.h
index 13374d86d..5ea59920a 100644
--- a/deps/v8/src/checks.h
+++ b/deps/v8/src/checks.h
@@ -280,14 +280,13 @@ template <int> class StaticAssertionHelper { };
// The ASSERT macro is equivalent to CHECK except that it only
-// generates code in debug builds. Ditto STATIC_ASSERT.
+// generates code in debug builds.
#ifdef DEBUG
#define ASSERT_RESULT(expr) CHECK(expr)
#define ASSERT(condition) CHECK(condition)
#define ASSERT_EQ(v1, v2) CHECK_EQ(v1, v2)
#define ASSERT_NE(v1, v2) CHECK_NE(v1, v2)
#define ASSERT_GE(v1, v2) CHECK_GE(v1, v2)
-#define STATIC_ASSERT(test) STATIC_CHECK(test)
#define SLOW_ASSERT(condition) if (FLAG_enable_slow_asserts) CHECK(condition)
#else
#define ASSERT_RESULT(expr) (expr)
@@ -295,9 +294,14 @@ template <int> class StaticAssertionHelper { };
#define ASSERT_EQ(v1, v2) ((void) 0)
#define ASSERT_NE(v1, v2) ((void) 0)
#define ASSERT_GE(v1, v2) ((void) 0)
-#define STATIC_ASSERT(test) ((void) 0)
#define SLOW_ASSERT(condition) ((void) 0)
#endif
+// Static asserts has no impact on runtime performance, so they can be
+// safely enabled in release mode. Moreover, the ((void) 0) expression
+// obeys different syntax rules than typedef's, e.g. it can't appear
+// inside class declaration, this leads to inconsistency between debug
+// and release compilation modes behaviour.
+#define STATIC_ASSERT(test) STATIC_CHECK(test)
#define ASSERT_TAG_ALIGNED(address) \
diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc
index 444698c53..a9fab43f3 100644
--- a/deps/v8/src/codegen.cc
+++ b/deps/v8/src/codegen.cc
@@ -77,14 +77,23 @@ void CodeGenerator::ProcessDeferred() {
// Generate the code.
Comment cmnt(masm_, code->comment());
masm_->bind(code->entry_label());
- code->SaveRegisters();
+ if (code->AutoSaveAndRestore()) {
+ code->SaveRegisters();
+ }
code->Generate();
- code->RestoreRegisters();
- masm_->jmp(code->exit_label());
+ if (code->AutoSaveAndRestore()) {
+ code->RestoreRegisters();
+ code->Exit();
+ }
}
}
+void DeferredCode::Exit() {
+ masm_->jmp(exit_label());
+}
+
+
void CodeGenerator::SetFrame(VirtualFrame* new_frame,
RegisterFile* non_frame_registers) {
RegisterFile saved_counts;
diff --git a/deps/v8/src/codegen.h b/deps/v8/src/codegen.h
index 2a6ad6435..3b31c04f9 100644
--- a/deps/v8/src/codegen.h
+++ b/deps/v8/src/codegen.h
@@ -101,7 +101,8 @@ enum UncatchableExceptionType { OUT_OF_MEMORY, TERMINATION };
F(IsObject, 1, 1) \
F(IsFunction, 1, 1) \
F(IsUndetectableObject, 1, 1) \
- F(IsSpecObject, 1, 1) \
+ F(IsSpecObject, 1, 1) \
+ F(IsStringWrapperSafeForDefaultValueOf, 1, 1) \
F(StringAdd, 2, 1) \
F(SubString, 3, 1) \
F(StringCompare, 2, 1) \
@@ -319,6 +320,15 @@ class DeferredCode: public ZoneObject {
void SaveRegisters();
void RestoreRegisters();
+ void Exit();
+
+ // If this returns true then all registers will be saved for the duration
+ // of the Generate() call. Otherwise the registers are not saved and the
+ // Generate() call must bracket runtime any runtime calls with calls to
+ // SaveRegisters() and RestoreRegisters(). In this case the Generate
+ // method must also call Exit() in order to return to the non-deferred
+ // code.
+ virtual bool AutoSaveAndRestore() { return true; }
protected:
MacroAssembler* masm_;
@@ -721,18 +731,6 @@ class CallFunctionStub: public CodeStub {
};
-class ToBooleanStub: public CodeStub {
- public:
- ToBooleanStub() { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- Major MajorKey() { return ToBoolean; }
- int MinorKey() { return 0; }
-};
-
-
enum StringIndexFlags {
// Accepts smis or heap numbers.
STRING_INDEX_IS_NUMBER,
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index d87d9da89..9f0162ea7 100755
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -33,7 +33,6 @@
#include "compiler.h"
#include "data-flow.h"
#include "debug.h"
-#include "fast-codegen.h"
#include "flow-graph.h"
#include "full-codegen.h"
#include "liveedit.h"
@@ -120,14 +119,9 @@ static Handle<Code> MakeCode(Handle<Context> context, CompilationInfo* info) {
//
// --full-compiler enables the dedicated backend for code we expect to be
// run once
- // --fast-compiler enables a speculative optimizing backend (for
- // non-run-once code)
//
// The normal choice of backend can be overridden with the flags
- // --always-full-compiler and --always-fast-compiler, which are mutually
- // incompatible.
- CHECK(!FLAG_always_full_compiler || !FLAG_always_fast_compiler);
-
+ // --always-full-compiler.
Handle<SharedFunctionInfo> shared = info->shared_info();
bool is_run_once = (shared.is_null())
? info->scope()->is_global_scope()
@@ -141,13 +135,6 @@ static Handle<Code> MakeCode(Handle<Context> context, CompilationInfo* info) {
if (checker.has_supported_syntax()) {
return FullCodeGenerator::MakeCode(info);
}
- } else if (FLAG_always_fast_compiler ||
- (FLAG_fast_compiler && !is_run_once)) {
- FastCodeGenSyntaxChecker checker;
- checker.Check(info);
- if (checker.has_supported_syntax()) {
- return FastCodeGenerator::MakeCode(info);
- }
}
return CodeGenerator::MakeCode(info);
@@ -494,7 +481,7 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
// Generate code
Handle<Code> code;
if (FLAG_lazy && allow_lazy) {
- code = ComputeLazyCompile(literal->num_parameters());
+ code = Handle<Code>(Builtins::builtin(Builtins::LazyCompile));
} else {
// The bodies of function literals have not yet been visited by
// the AST optimizer/analyzer.
@@ -528,7 +515,6 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
// the static helper function MakeCode.
CompilationInfo info(literal, script, false);
- CHECK(!FLAG_always_full_compiler || !FLAG_always_fast_compiler);
bool is_run_once = literal->try_full_codegen();
bool is_compiled = false;
@@ -542,16 +528,6 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
code = FullCodeGenerator::MakeCode(&info);
is_compiled = true;
}
- } else if (FLAG_always_fast_compiler ||
- (FLAG_fast_compiler && !is_run_once)) {
- // Since we are not lazily compiling we do not have a receiver to
- // specialize for.
- FastCodeGenSyntaxChecker checker;
- checker.Check(&info);
- if (checker.has_supported_syntax()) {
- code = FastCodeGenerator::MakeCode(&info);
- is_compiled = true;
- }
}
if (!is_compiled) {
diff --git a/deps/v8/src/compiler.h b/deps/v8/src/compiler.h
index ade21f574..ed26603f4 100644
--- a/deps/v8/src/compiler.h
+++ b/deps/v8/src/compiler.h
@@ -41,37 +41,6 @@ namespace internal {
// is constructed based on the resources available at compile-time.
class CompilationInfo BASE_EMBEDDED {
public:
- // Compilation mode. Either the compiler is used as the primary
- // compiler and needs to setup everything or the compiler is used as
- // the secondary compiler for split compilation and has to handle
- // bailouts.
- enum Mode {
- PRIMARY,
- SECONDARY
- };
-
- // A description of the compilation state at a bailout to the secondary
- // code generator.
- //
- // The state is currently simple: there are no parameters or local
- // variables to worry about ('this' can be found in the stack frame).
- // There are at most two live values.
- //
- // There is a label that should be bound to the beginning of the bailout
- // stub code.
- class Bailout : public ZoneObject {
- public:
- Bailout(Register left, Register right) : left_(left), right_(right) {}
-
- Label* label() { return &label_; }
-
- private:
- Register left_;
- Register right_;
- Label label_;
- };
-
-
// Lazy compilation of a JSFunction.
CompilationInfo(Handle<JSFunction> closure,
int loop_nesting,
@@ -145,12 +114,6 @@ class CompilationInfo BASE_EMBEDDED {
int loop_nesting() { return loop_nesting_; }
bool has_receiver() { return !receiver_.is_null(); }
Handle<Object> receiver() { return receiver_; }
- List<Bailout*>* bailouts() { return &bailouts_; }
-
- // Accessors for mutable fields (possibly set by analysis passes) with
- // default values given by Initialize.
- Mode mode() { return mode_; }
- void set_mode(Mode mode) { mode_ = mode; }
bool has_this_properties() { return has_this_properties_; }
void set_has_this_properties(bool flag) { has_this_properties_ = flag; }
@@ -169,19 +132,8 @@ class CompilationInfo BASE_EMBEDDED {
// Derived accessors.
Scope* scope() { return function()->scope(); }
- // Add a bailout with two live values.
- Label* AddBailout(Register left, Register right) {
- Bailout* bailout = new Bailout(left, right);
- bailouts_.Add(bailout);
- return bailout->label();
- }
-
- // Add a bailout with no live values.
- Label* AddBailout() { return AddBailout(no_reg, no_reg); }
-
private:
void Initialize() {
- mode_ = PRIMARY;
has_this_properties_ = false;
has_globals_ = false;
}
@@ -191,7 +143,6 @@ class CompilationInfo BASE_EMBEDDED {
Handle<Script> script_;
FunctionLiteral* function_;
- Mode mode_;
bool is_eval_;
int loop_nesting_;
@@ -201,10 +152,6 @@ class CompilationInfo BASE_EMBEDDED {
bool has_this_properties_;
bool has_globals_;
- // An ordered list of bailout points encountered during fast-path
- // compilation.
- List<Bailout*> bailouts_;
-
DISALLOW_COPY_AND_ASSIGN(CompilationInfo);
};
diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h
index 01bb21b5f..d1c98bd95 100644
--- a/deps/v8/src/contexts.h
+++ b/deps/v8/src/contexts.h
@@ -56,6 +56,7 @@ enum ContextLookupFlags {
V(BOOLEAN_FUNCTION_INDEX, JSFunction, boolean_function) \
V(NUMBER_FUNCTION_INDEX, JSFunction, number_function) \
V(STRING_FUNCTION_INDEX, JSFunction, string_function) \
+ V(STRING_FUNCTION_PROTOTYPE_MAP_INDEX, Map, string_function_prototype_map) \
V(OBJECT_FUNCTION_INDEX, JSFunction, object_function) \
V(ARRAY_FUNCTION_INDEX, JSFunction, array_function) \
V(DATE_FUNCTION_INDEX, JSFunction, date_function) \
@@ -186,6 +187,7 @@ class Context: public FixedArray {
BOOLEAN_FUNCTION_INDEX,
NUMBER_FUNCTION_INDEX,
STRING_FUNCTION_INDEX,
+ STRING_FUNCTION_PROTOTYPE_MAP_INDEX,
OBJECT_FUNCTION_INDEX,
ARRAY_FUNCTION_INDEX,
DATE_FUNCTION_INDEX,
diff --git a/deps/v8/src/cpu-profiler.cc b/deps/v8/src/cpu-profiler.cc
index c8d29f8cf..3e554cceb 100644
--- a/deps/v8/src/cpu-profiler.cc
+++ b/deps/v8/src/cpu-profiler.cc
@@ -476,7 +476,7 @@ void CpuProfiler::StartProcessorIfNotStarted() {
CpuProfile* CpuProfiler::StopCollectingProfile(const char* title) {
const double actual_sampling_rate = generator_->actual_sampling_rate();
- StopProcessorIfLastProfile();
+ StopProcessorIfLastProfile(title);
CpuProfile* result =
profiles_->StopProfiling(TokenEnumerator::kNoSecurityToken,
title,
@@ -491,14 +491,15 @@ CpuProfile* CpuProfiler::StopCollectingProfile(const char* title) {
CpuProfile* CpuProfiler::StopCollectingProfile(Object* security_token,
String* title) {
const double actual_sampling_rate = generator_->actual_sampling_rate();
- StopProcessorIfLastProfile();
+ const char* profile_title = profiles_->GetName(title);
+ StopProcessorIfLastProfile(profile_title);
int token = token_enumerator_->GetTokenId(security_token);
- return profiles_->StopProfiling(token, title, actual_sampling_rate);
+ return profiles_->StopProfiling(token, profile_title, actual_sampling_rate);
}
-void CpuProfiler::StopProcessorIfLastProfile() {
- if (profiles_->is_last_profile()) {
+void CpuProfiler::StopProcessorIfLastProfile(const char* title) {
+ if (profiles_->IsLastProfile(title)) {
reinterpret_cast<Sampler*>(Logger::ticker_)->Stop();
processor_->Stop();
processor_->Join();
diff --git a/deps/v8/src/cpu-profiler.h b/deps/v8/src/cpu-profiler.h
index 03b817648..4d5559e4f 100644
--- a/deps/v8/src/cpu-profiler.h
+++ b/deps/v8/src/cpu-profiler.h
@@ -260,7 +260,7 @@ class CpuProfiler {
void StartProcessorIfNotStarted();
CpuProfile* StopCollectingProfile(const char* title);
CpuProfile* StopCollectingProfile(Object* security_token, String* title);
- void StopProcessorIfLastProfile();
+ void StopProcessorIfLastProfile(const char* title);
CpuProfilesCollection* profiles_;
unsigned next_profile_uid_;
diff --git a/deps/v8/src/debug.cc b/deps/v8/src/debug.cc
index 5d386cc04..c13c8c987 100644
--- a/deps/v8/src/debug.cc
+++ b/deps/v8/src/debug.cc
@@ -582,6 +582,35 @@ int Debug::ArchiveSpacePerThread() {
}
+// Frame structure (conforms InternalFrame structure):
+// -- code
+// -- SMI maker
+// -- function (slot is called "context")
+// -- frame base
+Object** Debug::SetUpFrameDropperFrame(StackFrame* bottom_js_frame,
+ Handle<Code> code) {
+ ASSERT(bottom_js_frame->is_java_script());
+
+ Address fp = bottom_js_frame->fp();
+
+ // Move function pointer into "context" slot.
+ Memory::Object_at(fp + StandardFrameConstants::kContextOffset) =
+ Memory::Object_at(fp + JavaScriptFrameConstants::kFunctionOffset);
+
+ Memory::Object_at(fp + InternalFrameConstants::kCodeOffset) = *code;
+ Memory::Object_at(fp + StandardFrameConstants::kMarkerOffset) =
+ Smi::FromInt(StackFrame::INTERNAL);
+
+ return reinterpret_cast<Object**>(&Memory::Object_at(
+ fp + StandardFrameConstants::kContextOffset));
+}
+
+const int Debug::kFrameDropperFrameSize = 4;
+
+
+
+
+
// Default break enabled.
bool Debug::disable_break_ = false;
@@ -852,8 +881,8 @@ void Debug::PreemptionWhileInDebugger() {
void Debug::Iterate(ObjectVisitor* v) {
- v->VisitPointer(BitCast<Object**, Code**>(&(debug_break_return_)));
- v->VisitPointer(BitCast<Object**, Code**>(&(debug_break_slot_)));
+ v->VisitPointer(BitCast<Object**>(&(debug_break_return_)));
+ v->VisitPointer(BitCast<Object**>(&(debug_break_slot_)));
}
diff --git a/deps/v8/src/debug.h b/deps/v8/src/debug.h
index b6aba5aad..98d191942 100644
--- a/deps/v8/src/debug.h
+++ b/deps/v8/src/debug.h
@@ -400,6 +400,11 @@ class Debug {
static void GenerateStubNoRegistersDebugBreak(MacroAssembler* masm);
static void GenerateSlotDebugBreak(MacroAssembler* masm);
static void GeneratePlainReturnLiveEdit(MacroAssembler* masm);
+
+ // FrameDropper is a code replacement for a JavaScript frame with possibly
+ // several frames above.
+ // There is no calling conventions here, because it never actually gets
+ // called, it only gets returned to.
static void GenerateFrameDropperLiveEdit(MacroAssembler* masm);
// Called from stub-cache.cc.
@@ -431,13 +436,14 @@ class Debug {
// the value that is called 'restarter_frame_function_pointer'. The value
// at this address (possibly updated by GC) may be used later when preparing
// 'step in' operation.
- // The implementation is architecture-specific.
- // TODO(LiveEdit): consider reviewing it as architecture-independent.
static Object** SetUpFrameDropperFrame(StackFrame* bottom_js_frame,
Handle<Code> code);
static const int kFrameDropperFrameSize;
+ // Architecture-specific constant.
+ static const bool kFrameDropperSupported;
+
private:
static bool CompileDebuggerScript(int index);
static void ClearOneShot();
diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc
index d65338385..7c8c93404 100644
--- a/deps/v8/src/factory.cc
+++ b/deps/v8/src/factory.cc
@@ -32,6 +32,7 @@
#include "execution.h"
#include "factory.h"
#include "macro-assembler.h"
+#include "objects-visiting.h"
namespace v8 {
namespace internal {
@@ -277,8 +278,7 @@ Handle<Map> Factory::CopyMap(Handle<Map> src,
copy->set_inobject_properties(inobject_properties);
copy->set_unused_property_fields(inobject_properties);
copy->set_instance_size(copy->instance_size() + instance_size_delta);
- copy->set_scavenger(Heap::GetScavenger(copy->instance_type(),
- copy->instance_size()));
+ copy->set_visitor_id(StaticVisitorBase::GetVisitorId(*copy));
return copy;
}
@@ -486,6 +486,10 @@ Handle<JSFunction> Factory::NewFunction(Handle<String> name,
bool force_initial_map) {
// Allocate the function
Handle<JSFunction> function = NewFunction(name, the_hole_value());
+
+ // Setup the code pointer in both the shared function info and in
+ // the function itself.
+ function->shared()->set_code(*code);
function->set_code(*code);
if (force_initial_map ||
@@ -511,9 +515,12 @@ Handle<JSFunction> Factory::NewFunctionWithPrototype(Handle<String> name,
Handle<JSObject> prototype,
Handle<Code> code,
bool force_initial_map) {
- // Allocate the function
+ // Allocate the function.
Handle<JSFunction> function = NewFunction(name, prototype);
+ // Setup the code pointer in both the shared function info and in
+ // the function itself.
+ function->shared()->set_code(*code);
function->set_code(*code);
if (force_initial_map ||
@@ -535,6 +542,7 @@ Handle<JSFunction> Factory::NewFunctionWithPrototype(Handle<String> name,
Handle<JSFunction> Factory::NewFunctionWithoutPrototype(Handle<String> name,
Handle<Code> code) {
Handle<JSFunction> function = NewFunctionWithoutPrototype(name);
+ function->shared()->set_code(*code);
function->set_code(*code);
ASSERT(!function->has_initial_map());
ASSERT(!function->has_prototype());
diff --git a/deps/v8/src/factory.h b/deps/v8/src/factory.h
index 22511121c..c014986f4 100644
--- a/deps/v8/src/factory.h
+++ b/deps/v8/src/factory.h
@@ -329,7 +329,7 @@ class Factory : public AllStatic {
#define ROOT_ACCESSOR(type, name, camel_name) \
static inline Handle<type> name() { \
- return Handle<type>(BitCast<type**, Object**>( \
+ return Handle<type>(BitCast<type**>( \
&Heap::roots_[Heap::k##camel_name##RootIndex])); \
}
ROOT_LIST(ROOT_ACCESSOR)
@@ -337,7 +337,7 @@ class Factory : public AllStatic {
#define SYMBOL_ACCESSOR(name, str) \
static inline Handle<String> name() { \
- return Handle<String>(BitCast<String**, Object**>( \
+ return Handle<String>(BitCast<String**>( \
&Heap::roots_[Heap::k##name##RootIndex])); \
}
SYMBOL_LIST(SYMBOL_ACCESSOR)
diff --git a/deps/v8/src/fast-codegen.cc b/deps/v8/src/fast-codegen.cc
deleted file mode 100644
index 832cf7465..000000000
--- a/deps/v8/src/fast-codegen.cc
+++ /dev/null
@@ -1,746 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "codegen-inl.h"
-#include "data-flow.h"
-#include "fast-codegen.h"
-#include "scopes.h"
-
-namespace v8 {
-namespace internal {
-
-#define BAILOUT(reason) \
- do { \
- if (FLAG_trace_bailout) { \
- PrintF("%s\n", reason); \
- } \
- has_supported_syntax_ = false; \
- return; \
- } while (false)
-
-
-#define CHECK_BAILOUT \
- do { \
- if (!has_supported_syntax_) return; \
- } while (false)
-
-
-void FastCodeGenSyntaxChecker::Check(CompilationInfo* info) {
- info_ = info;
-
- // We do not specialize if we do not have a receiver or if it is not a
- // JS object with fast mode properties.
- if (!info->has_receiver()) BAILOUT("No receiver");
- if (!info->receiver()->IsJSObject()) BAILOUT("Receiver is not an object");
- Handle<JSObject> object = Handle<JSObject>::cast(info->receiver());
- if (!object->HasFastProperties()) BAILOUT("Receiver is in dictionary mode");
-
- // We do not support stack or heap slots (both of which require
- // allocation).
- Scope* scope = info->scope();
- if (scope->num_stack_slots() > 0) {
- BAILOUT("Function has stack-allocated locals");
- }
- if (scope->num_heap_slots() > 0) {
- BAILOUT("Function has context-allocated locals");
- }
-
- VisitDeclarations(scope->declarations());
- CHECK_BAILOUT;
-
- // We do not support empty function bodies.
- if (info->function()->body()->is_empty()) {
- BAILOUT("Function has an empty body");
- }
- VisitStatements(info->function()->body());
-}
-
-
-void FastCodeGenSyntaxChecker::VisitDeclarations(
- ZoneList<Declaration*>* decls) {
- if (!decls->is_empty()) BAILOUT("Function has declarations");
-}
-
-
-void FastCodeGenSyntaxChecker::VisitStatements(ZoneList<Statement*>* stmts) {
- if (stmts->length() != 1) {
- BAILOUT("Function body is not a singleton statement.");
- }
- Visit(stmts->at(0));
-}
-
-
-void FastCodeGenSyntaxChecker::VisitDeclaration(Declaration* decl) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenSyntaxChecker::VisitBlock(Block* stmt) {
- VisitStatements(stmt->statements());
-}
-
-
-void FastCodeGenSyntaxChecker::VisitExpressionStatement(
- ExpressionStatement* stmt) {
- Visit(stmt->expression());
-}
-
-
-void FastCodeGenSyntaxChecker::VisitEmptyStatement(EmptyStatement* stmt) {
- // Supported.
-}
-
-
-void FastCodeGenSyntaxChecker::VisitIfStatement(IfStatement* stmt) {
- BAILOUT("IfStatement");
-}
-
-
-void FastCodeGenSyntaxChecker::VisitContinueStatement(ContinueStatement* stmt) {
- BAILOUT("Continuestatement");
-}
-
-
-void FastCodeGenSyntaxChecker::VisitBreakStatement(BreakStatement* stmt) {
- BAILOUT("BreakStatement");
-}
-
-
-void FastCodeGenSyntaxChecker::VisitReturnStatement(ReturnStatement* stmt) {
- BAILOUT("ReturnStatement");
-}
-
-
-void FastCodeGenSyntaxChecker::VisitWithEnterStatement(
- WithEnterStatement* stmt) {
- BAILOUT("WithEnterStatement");
-}
-
-
-void FastCodeGenSyntaxChecker::VisitWithExitStatement(WithExitStatement* stmt) {
- BAILOUT("WithExitStatement");
-}
-
-
-void FastCodeGenSyntaxChecker::VisitSwitchStatement(SwitchStatement* stmt) {
- BAILOUT("SwitchStatement");
-}
-
-
-void FastCodeGenSyntaxChecker::VisitDoWhileStatement(DoWhileStatement* stmt) {
- BAILOUT("DoWhileStatement");
-}
-
-
-void FastCodeGenSyntaxChecker::VisitWhileStatement(WhileStatement* stmt) {
- BAILOUT("WhileStatement");
-}
-
-
-void FastCodeGenSyntaxChecker::VisitForStatement(ForStatement* stmt) {
- BAILOUT("ForStatement");
-}
-
-
-void FastCodeGenSyntaxChecker::VisitForInStatement(ForInStatement* stmt) {
- BAILOUT("ForInStatement");
-}
-
-
-void FastCodeGenSyntaxChecker::VisitTryCatchStatement(TryCatchStatement* stmt) {
- BAILOUT("TryCatchStatement");
-}
-
-
-void FastCodeGenSyntaxChecker::VisitTryFinallyStatement(
- TryFinallyStatement* stmt) {
- BAILOUT("TryFinallyStatement");
-}
-
-
-void FastCodeGenSyntaxChecker::VisitDebuggerStatement(
- DebuggerStatement* stmt) {
- BAILOUT("DebuggerStatement");
-}
-
-
-void FastCodeGenSyntaxChecker::VisitFunctionLiteral(FunctionLiteral* expr) {
- BAILOUT("FunctionLiteral");
-}
-
-
-void FastCodeGenSyntaxChecker::VisitSharedFunctionInfoLiteral(
- SharedFunctionInfoLiteral* expr) {
- BAILOUT("SharedFunctionInfoLiteral");
-}
-
-
-void FastCodeGenSyntaxChecker::VisitConditional(Conditional* expr) {
- BAILOUT("Conditional");
-}
-
-
-void FastCodeGenSyntaxChecker::VisitSlot(Slot* expr) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenSyntaxChecker::VisitVariableProxy(VariableProxy* expr) {
- // Only global variable references are supported.
- Variable* var = expr->var();
- if (!var->is_global() || var->is_this()) BAILOUT("Non-global variable");
-
- // Check if the global variable is existing and non-deletable.
- if (info()->has_global_object()) {
- LookupResult lookup;
- info()->global_object()->Lookup(*expr->name(), &lookup);
- if (!lookup.IsProperty()) {
- BAILOUT("Non-existing global variable");
- }
- // We do not handle global variables with accessors or interceptors.
- if (lookup.type() != NORMAL) {
- BAILOUT("Global variable with accessors or interceptors.");
- }
- // We do not handle deletable global variables.
- if (!lookup.IsDontDelete()) {
- BAILOUT("Deletable global variable");
- }
- }
-}
-
-
-void FastCodeGenSyntaxChecker::VisitLiteral(Literal* expr) {
- BAILOUT("Literal");
-}
-
-
-void FastCodeGenSyntaxChecker::VisitRegExpLiteral(RegExpLiteral* expr) {
- BAILOUT("RegExpLiteral");
-}
-
-
-void FastCodeGenSyntaxChecker::VisitObjectLiteral(ObjectLiteral* expr) {
- BAILOUT("ObjectLiteral");
-}
-
-
-void FastCodeGenSyntaxChecker::VisitArrayLiteral(ArrayLiteral* expr) {
- BAILOUT("ArrayLiteral");
-}
-
-
-void FastCodeGenSyntaxChecker::VisitCatchExtensionObject(
- CatchExtensionObject* expr) {
- BAILOUT("CatchExtensionObject");
-}
-
-
-void FastCodeGenSyntaxChecker::VisitAssignment(Assignment* expr) {
- // Simple assignments to (named) this properties are supported.
- if (expr->op() != Token::ASSIGN) BAILOUT("Non-simple assignment");
-
- Property* prop = expr->target()->AsProperty();
- if (prop == NULL) BAILOUT("Non-property assignment");
- VariableProxy* proxy = prop->obj()->AsVariableProxy();
- if (proxy == NULL || !proxy->var()->is_this()) {
- BAILOUT("Non-this-property assignment");
- }
- if (!prop->key()->IsPropertyName()) {
- BAILOUT("Non-named-property assignment");
- }
-
- // We will only specialize for fields on the object itself.
- // Expression::IsPropertyName implies that the name is a literal
- // symbol but we do not assume that.
- Literal* key = prop->key()->AsLiteral();
- if (key != NULL && key->handle()->IsString()) {
- Handle<Object> receiver = info()->receiver();
- Handle<String> name = Handle<String>::cast(key->handle());
- LookupResult lookup;
- receiver->Lookup(*name, &lookup);
- if (!lookup.IsProperty()) {
- BAILOUT("Assigned property not found at compile time");
- }
- if (lookup.holder() != *receiver) BAILOUT("Non-own property assignment");
- if (!lookup.type() == FIELD) BAILOUT("Non-field property assignment");
- } else {
- UNREACHABLE();
- BAILOUT("Unexpected non-string-literal property key");
- }
-
- Visit(expr->value());
-}
-
-
-void FastCodeGenSyntaxChecker::VisitThrow(Throw* expr) {
- BAILOUT("Throw");
-}
-
-
-void FastCodeGenSyntaxChecker::VisitProperty(Property* expr) {
- // We support named this property references.
- VariableProxy* proxy = expr->obj()->AsVariableProxy();
- if (proxy == NULL || !proxy->var()->is_this()) {
- BAILOUT("Non-this-property reference");
- }
- if (!expr->key()->IsPropertyName()) {
- BAILOUT("Non-named-property reference");
- }
-
- // We will only specialize for fields on the object itself.
- // Expression::IsPropertyName implies that the name is a literal
- // symbol but we do not assume that.
- Literal* key = expr->key()->AsLiteral();
- if (key != NULL && key->handle()->IsString()) {
- Handle<Object> receiver = info()->receiver();
- Handle<String> name = Handle<String>::cast(key->handle());
- LookupResult lookup;
- receiver->Lookup(*name, &lookup);
- if (!lookup.IsProperty()) {
- BAILOUT("Referenced property not found at compile time");
- }
- if (lookup.holder() != *receiver) BAILOUT("Non-own property reference");
- if (!lookup.type() == FIELD) BAILOUT("Non-field property reference");
- } else {
- UNREACHABLE();
- BAILOUT("Unexpected non-string-literal property key");
- }
-}
-
-
-void FastCodeGenSyntaxChecker::VisitCall(Call* expr) {
- BAILOUT("Call");
-}
-
-
-void FastCodeGenSyntaxChecker::VisitCallNew(CallNew* expr) {
- BAILOUT("CallNew");
-}
-
-
-void FastCodeGenSyntaxChecker::VisitCallRuntime(CallRuntime* expr) {
- BAILOUT("CallRuntime");
-}
-
-
-void FastCodeGenSyntaxChecker::VisitUnaryOperation(UnaryOperation* expr) {
- BAILOUT("UnaryOperation");
-}
-
-
-void FastCodeGenSyntaxChecker::VisitCountOperation(CountOperation* expr) {
- BAILOUT("CountOperation");
-}
-
-
-void FastCodeGenSyntaxChecker::VisitBinaryOperation(BinaryOperation* expr) {
- // We support bitwise OR.
- switch (expr->op()) {
- case Token::COMMA:
- BAILOUT("BinaryOperation COMMA");
- case Token::OR:
- BAILOUT("BinaryOperation OR");
- case Token::AND:
- BAILOUT("BinaryOperation AND");
-
- case Token::BIT_OR:
- // We support expressions nested on the left because they only require
- // a pair of registers to keep all intermediate values in registers
- // (i.e., the expression stack has height no more than two).
- if (!expr->right()->IsLeaf()) BAILOUT("expression nested on right");
-
- // We do not allow subexpressions with side effects because we
- // (currently) bail out to the beginning of the full function. The
- // only expressions with side effects that we would otherwise handle
- // are assignments.
- if (expr->left()->AsAssignment() != NULL ||
- expr->right()->AsAssignment() != NULL) {
- BAILOUT("subexpression of binary operation has side effects");
- }
-
- Visit(expr->left());
- CHECK_BAILOUT;
- Visit(expr->right());
- break;
-
- case Token::BIT_XOR:
- BAILOUT("BinaryOperation BIT_XOR");
- case Token::BIT_AND:
- BAILOUT("BinaryOperation BIT_AND");
- case Token::SHL:
- BAILOUT("BinaryOperation SHL");
- case Token::SAR:
- BAILOUT("BinaryOperation SAR");
- case Token::SHR:
- BAILOUT("BinaryOperation SHR");
- case Token::ADD:
- BAILOUT("BinaryOperation ADD");
- case Token::SUB:
- BAILOUT("BinaryOperation SUB");
- case Token::MUL:
- BAILOUT("BinaryOperation MUL");
- case Token::DIV:
- BAILOUT("BinaryOperation DIV");
- case Token::MOD:
- BAILOUT("BinaryOperation MOD");
- default:
- UNREACHABLE();
- }
-}
-
-
-void FastCodeGenSyntaxChecker::VisitCompareOperation(CompareOperation* expr) {
- BAILOUT("CompareOperation");
-}
-
-
-void FastCodeGenSyntaxChecker::VisitThisFunction(ThisFunction* expr) {
- BAILOUT("ThisFunction");
-}
-
-#undef BAILOUT
-#undef CHECK_BAILOUT
-
-
-#define __ ACCESS_MASM(masm())
-
-Handle<Code> FastCodeGenerator::MakeCode(CompilationInfo* info) {
- // Label the AST before calling MakeCodePrologue, so AST node numbers are
- // printed with the AST.
- AstLabeler labeler;
- labeler.Label(info);
-
- CodeGenerator::MakeCodePrologue(info);
-
- const int kInitialBufferSize = 4 * KB;
- MacroAssembler masm(NULL, kInitialBufferSize);
-
- // Generate the fast-path code.
- FastCodeGenerator fast_cgen(&masm);
- fast_cgen.Generate(info);
- if (fast_cgen.HasStackOverflow()) {
- ASSERT(!Top::has_pending_exception());
- return Handle<Code>::null();
- }
-
- // Generate the full code for the function in bailout mode, using the same
- // macro assembler.
- CodeGenerator cgen(&masm);
- CodeGeneratorScope scope(&cgen);
- info->set_mode(CompilationInfo::SECONDARY);
- cgen.Generate(info);
- if (cgen.HasStackOverflow()) {
- ASSERT(!Top::has_pending_exception());
- return Handle<Code>::null();
- }
-
- Code::Flags flags = Code::ComputeFlags(Code::FUNCTION, NOT_IN_LOOP);
- return CodeGenerator::MakeCodeEpilogue(&masm, flags, info);
-}
-
-
-void FastCodeGenerator::VisitDeclaration(Declaration* decl) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitBlock(Block* stmt) {
- VisitStatements(stmt->statements());
-}
-
-
-void FastCodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) {
- Visit(stmt->expression());
-}
-
-
-void FastCodeGenerator::VisitEmptyStatement(EmptyStatement* stmt) {
- // Nothing to do.
-}
-
-
-void FastCodeGenerator::VisitIfStatement(IfStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitContinueStatement(ContinueStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitBreakStatement(BreakStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitWithEnterStatement(WithEnterStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitWithExitStatement(WithExitStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitForStatement(ForStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitDebuggerStatement(DebuggerStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitSharedFunctionInfoLiteral(
- SharedFunctionInfoLiteral* expr) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitConditional(Conditional* expr) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitSlot(Slot* expr) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
- ASSERT(expr->var()->is_global() && !expr->var()->is_this());
- // Check if we can compile a global variable load directly from the cell.
- ASSERT(info()->has_global_object());
- LookupResult lookup;
- info()->global_object()->Lookup(*expr->name(), &lookup);
- // We only support normal (non-accessor/interceptor) DontDelete properties
- // for now.
- ASSERT(lookup.IsProperty());
- ASSERT_EQ(NORMAL, lookup.type());
- ASSERT(lookup.IsDontDelete());
- Handle<Object> cell(info()->global_object()->GetPropertyCell(&lookup));
-
- // Global variable lookups do not have side effects, so we do not need to
- // emit code if we are in an effect context.
- if (!destination().is(no_reg)) {
- Comment cmnt(masm(), ";; Global");
- if (FLAG_print_ir) {
- SmartPointer<char> name = expr->name()->ToCString();
- PrintF("%d: t%d = Global(%s)\n", expr->num(),
- expr->num(), *name);
- }
- EmitGlobalVariableLoad(cell);
- }
-}
-
-
-void FastCodeGenerator::VisitLiteral(Literal* expr) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* expr) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitAssignment(Assignment* expr) {
- // Known to be a simple this property assignment. Effectively a unary
- // operation.
- { Register my_destination = destination();
- set_destination(accumulator0());
- Visit(expr->value());
- set_destination(my_destination);
- }
-
- Property* prop = expr->target()->AsProperty();
- ASSERT_NOT_NULL(prop);
- ASSERT_NOT_NULL(prop->obj()->AsVariableProxy());
- ASSERT(prop->obj()->AsVariableProxy()->var()->is_this());
- ASSERT(prop->key()->IsPropertyName());
- Handle<String> name =
- Handle<String>::cast(prop->key()->AsLiteral()->handle());
-
- Comment cmnt(masm(), ";; Store to this");
- if (FLAG_print_ir) {
- SmartPointer<char> name_string = name->ToCString();
- PrintF("%d: ", expr->num());
- if (!destination().is(no_reg)) PrintF("t%d = ", expr->num());
- PrintF("Store(this, \"%s\", t%d)\n", *name_string,
- expr->value()->num());
- }
-
- EmitThisPropertyStore(name);
-}
-
-
-void FastCodeGenerator::VisitThrow(Throw* expr) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitProperty(Property* expr) {
- ASSERT_NOT_NULL(expr->obj()->AsVariableProxy());
- ASSERT(expr->obj()->AsVariableProxy()->var()->is_this());
- ASSERT(expr->key()->IsPropertyName());
- if (!destination().is(no_reg)) {
- Handle<String> name =
- Handle<String>::cast(expr->key()->AsLiteral()->handle());
-
- Comment cmnt(masm(), ";; Load from this");
- if (FLAG_print_ir) {
- SmartPointer<char> name_string = name->ToCString();
- PrintF("%d: t%d = Load(this, \"%s\")\n",
- expr->num(), expr->num(), *name_string);
- }
- EmitThisPropertyLoad(name);
- }
-}
-
-
-void FastCodeGenerator::VisitCall(Call* expr) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitCallNew(CallNew* expr) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitCountOperation(CountOperation* expr) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
- // We support limited binary operations: bitwise OR only allowed to be
- // nested on the left.
- ASSERT(expr->op() == Token::BIT_OR);
- ASSERT(expr->right()->IsLeaf());
-
- { Register my_destination = destination();
- set_destination(accumulator1());
- Visit(expr->left());
- set_destination(accumulator0());
- Visit(expr->right());
- set_destination(my_destination);
- }
-
- Comment cmnt(masm(), ";; BIT_OR");
- if (FLAG_print_ir) {
- PrintF("%d: ", expr->num());
- if (!destination().is(no_reg)) PrintF("t%d = ", expr->num());
- PrintF("BIT_OR(t%d, t%d)\n", expr->left()->num(), expr->right()->num());
- }
- EmitBitOr();
-}
-
-
-void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitThisFunction(ThisFunction* expr) {
- UNREACHABLE();
-}
-
-#undef __
-
-
-} } // namespace v8::internal
diff --git a/deps/v8/src/fast-codegen.h b/deps/v8/src/fast-codegen.h
deleted file mode 100644
index a0282bbc4..000000000
--- a/deps/v8/src/fast-codegen.h
+++ /dev/null
@@ -1,161 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_FAST_CODEGEN_H_
-#define V8_FAST_CODEGEN_H_
-
-#if V8_TARGET_ARCH_IA32
-#include "ia32/fast-codegen-ia32.h"
-#else
-
-#include "v8.h"
-
-#include "ast.h"
-#include "compiler.h"
-#include "list.h"
-
-namespace v8 {
-namespace internal {
-
-class FastCodeGenSyntaxChecker: public AstVisitor {
- public:
- explicit FastCodeGenSyntaxChecker()
- : info_(NULL), has_supported_syntax_(true) {
- }
-
- void Check(CompilationInfo* info);
-
- CompilationInfo* info() { return info_; }
- bool has_supported_syntax() { return has_supported_syntax_; }
-
- private:
- void VisitDeclarations(ZoneList<Declaration*>* decls);
- void VisitStatements(ZoneList<Statement*>* stmts);
-
- // AST node visit functions.
-#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
- AST_NODE_LIST(DECLARE_VISIT)
-#undef DECLARE_VISIT
-
- CompilationInfo* info_;
- bool has_supported_syntax_;
-
- DISALLOW_COPY_AND_ASSIGN(FastCodeGenSyntaxChecker);
-};
-
-
-class FastCodeGenerator: public AstVisitor {
- public:
- explicit FastCodeGenerator(MacroAssembler* masm)
- : masm_(masm), info_(NULL), destination_(no_reg), smi_bits_(0) {
- }
-
- static Handle<Code> MakeCode(CompilationInfo* info);
-
- void Generate(CompilationInfo* compilation_info);
-
- private:
- MacroAssembler* masm() { return masm_; }
- CompilationInfo* info() { return info_; }
-
- Register destination() { return destination_; }
- void set_destination(Register reg) { destination_ = reg; }
-
- FunctionLiteral* function() { return info_->function(); }
- Scope* scope() { return info_->scope(); }
-
- // Platform-specific fixed registers, all guaranteed distinct.
- Register accumulator0();
- Register accumulator1();
- Register scratch0();
- Register scratch1();
- Register scratch2();
- Register receiver_reg();
- Register context_reg();
-
- Register other_accumulator(Register reg) {
- ASSERT(reg.is(accumulator0()) || reg.is(accumulator1()));
- return (reg.is(accumulator0())) ? accumulator1() : accumulator0();
- }
-
- // Flags are true if the respective register is statically known to hold a
- // smi. We do not track every register, only the accumulator registers.
- bool is_smi(Register reg) {
- ASSERT(!reg.is(no_reg));
- return (smi_bits_ & reg.bit()) != 0;
- }
- void set_as_smi(Register reg) {
- ASSERT(!reg.is(no_reg));
- smi_bits_ = smi_bits_ | reg.bit();
- }
- void clear_as_smi(Register reg) {
- ASSERT(!reg.is(no_reg));
- smi_bits_ = smi_bits_ & ~reg.bit();
- }
-
- // AST node visit functions.
-#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
- AST_NODE_LIST(DECLARE_VISIT)
-#undef DECLARE_VISIT
-
- // Emit code to load the receiver from the stack into receiver_reg.
- void EmitLoadReceiver();
-
- // Emit code to load a global variable directly from a global property
- // cell into the destination register.
- void EmitGlobalVariableLoad(Handle<Object> cell);
-
- // Emit a store to an own property of this. The stored value is expected
- // in accumulator0 and the receiver in receiver_reg. The receiver
- // register is preserved and the result (the stored value) is left in the
- // destination register.
- void EmitThisPropertyStore(Handle<String> name);
-
- // Emit a load from an own property of this. The receiver is expected in
- // receiver_reg. The receiver register is preserved and the result is
- // left in the destination register.
- void EmitThisPropertyLoad(Handle<String> name);
-
- // Emit a bitwise or operation. The left operand is in accumulator1 and
- // the right is in accumulator0. The result should be left in the
- // destination register.
- void EmitBitOr();
-
- MacroAssembler* masm_;
- CompilationInfo* info_;
- Register destination_;
- uint32_t smi_bits_;
-
- DISALLOW_COPY_AND_ASSIGN(FastCodeGenerator);
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_IA32
-
-#endif // V8_FAST_CODEGEN_H_
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h
index 02e8f16e4..a143bcd6f 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flag-definitions.h
@@ -148,11 +148,8 @@ DEFINE_bool(strict, false, "strict error checking")
DEFINE_int(min_preparse_length, 1024,
"minimum length for automatic enable preparsing")
DEFINE_bool(full_compiler, true, "enable dedicated backend for run-once code")
-DEFINE_bool(fast_compiler, false, "enable speculative optimizing backend")
DEFINE_bool(always_full_compiler, false,
"try to use the dedicated run-once backend for all code")
-DEFINE_bool(always_fast_compiler, false,
- "try to use the speculative optimizing backend for all code")
DEFINE_bool(trace_bailout, false,
"print reasons for falling back to using the classic V8 backend")
DEFINE_bool(safe_int32_compiler, true,
diff --git a/deps/v8/src/full-codegen.cc b/deps/v8/src/full-codegen.cc
index a468f149f..e97ed7607 100644
--- a/deps/v8/src/full-codegen.cc
+++ b/deps/v8/src/full-codegen.cc
@@ -677,7 +677,7 @@ Handle<Code> FullCodeGenerator::MakeCode(CompilationInfo* info) {
MacroAssembler masm(NULL, kInitialBufferSize);
FullCodeGenerator cgen(&masm);
- cgen.Generate(info, PRIMARY);
+ cgen.Generate(info);
if (cgen.HasStackOverflow()) {
ASSERT(!Top::has_pending_exception());
return Handle<Code>::null();
@@ -919,6 +919,9 @@ void FullCodeGenerator::EmitInlineRuntimeCall(CallRuntime* expr) {
EmitGetFromCache(expr->arguments());
} else if (strcmp("_IsRegExpEquivalent", *name->ToCString()) == 0) {
EmitIsRegExpEquivalent(expr->arguments());
+ } else if (strcmp("_IsStringWrapperSafeForDefaultValueOf",
+ *name->ToCString()) == 0) {
+ EmitIsStringWrapperSafeForDefaultValueOf(expr->arguments());
} else {
UNREACHABLE();
}
diff --git a/deps/v8/src/full-codegen.h b/deps/v8/src/full-codegen.h
index 6e2fecb6c..00f4c06e2 100644
--- a/deps/v8/src/full-codegen.h
+++ b/deps/v8/src/full-codegen.h
@@ -89,11 +89,6 @@ class BreakableStatementChecker: public AstVisitor {
class FullCodeGenerator: public AstVisitor {
public:
- enum Mode {
- PRIMARY,
- SECONDARY
- };
-
explicit FullCodeGenerator(MacroAssembler* masm)
: masm_(masm),
info_(NULL),
@@ -106,7 +101,7 @@ class FullCodeGenerator: public AstVisitor {
static Handle<Code> MakeCode(CompilationInfo* info);
- void Generate(CompilationInfo* info, Mode mode);
+ void Generate(CompilationInfo* info);
private:
class Breakable;
@@ -408,6 +403,8 @@ class FullCodeGenerator: public AstVisitor {
void EmitIsArray(ZoneList<Expression*>* arguments);
void EmitIsRegExp(ZoneList<Expression*>* arguments);
void EmitIsConstructCall(ZoneList<Expression*>* arguments);
+ void EmitIsStringWrapperSafeForDefaultValueOf(
+ ZoneList<Expression*>* arguments);
void EmitObjectEquals(ZoneList<Expression*>* arguments);
void EmitArguments(ZoneList<Expression*>* arguments);
void EmitArgumentsLength(ZoneList<Expression*>* arguments);
diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h
index 030af7cc7..3fe9e240b 100644
--- a/deps/v8/src/globals.h
+++ b/deps/v8/src/globals.h
@@ -507,6 +507,31 @@ union DoubleRepresentation {
};
+// Union used for customized checking of the IEEE double types
+// inlined within v8 runtime, rather than going to the underlying
+// platform headers and libraries
+union IeeeDoubleLittleEndianArchType {
+ double d;
+ struct {
+ unsigned int man_low :32;
+ unsigned int man_high :20;
+ unsigned int exp :11;
+ unsigned int sign :1;
+ } bits;
+};
+
+
+union IeeeDoubleBigEndianArchType {
+ double d;
+ struct {
+ unsigned int sign :1;
+ unsigned int exp :11;
+ unsigned int man_high :20;
+ unsigned int man_low :32;
+ } bits;
+};
+
+
// AccessorCallback
struct AccessorDescriptor {
Object* (*getter)(Object* object, void* data);
diff --git a/deps/v8/src/handles-inl.h b/deps/v8/src/handles-inl.h
index 8478bb5cd..bf19f5f86 100644
--- a/deps/v8/src/handles-inl.h
+++ b/deps/v8/src/handles-inl.h
@@ -47,7 +47,7 @@ template <class T>
inline T* Handle<T>::operator*() const {
ASSERT(location_ != NULL);
ASSERT(reinterpret_cast<Address>(*location_) != kHandleZapValue);
- return *location_;
+ return *BitCast<T**>(location_);
}
diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles.cc
index 0d218cb97..7b76e923f 100644
--- a/deps/v8/src/handles.cc
+++ b/deps/v8/src/handles.cc
@@ -637,8 +637,8 @@ Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSObject> object,
// Check access rights if required.
if (current->IsAccessCheckNeeded() &&
- !Top::MayNamedAccess(*current, Heap::undefined_value(),
- v8::ACCESS_KEYS)) {
+ !Top::MayNamedAccess(*current, Heap::undefined_value(),
+ v8::ACCESS_KEYS)) {
Top::ReportFailedAccessCheck(*current, v8::ACCESS_KEYS);
break;
}
@@ -771,20 +771,30 @@ bool CompileLazyShared(Handle<SharedFunctionInfo> shared,
bool CompileLazy(Handle<JSFunction> function,
Handle<Object> receiver,
ClearExceptionFlag flag) {
- CompilationInfo info(function, 0, receiver);
- bool result = CompileLazyHelper(&info, flag);
- PROFILE(FunctionCreateEvent(*function));
- return result;
+ if (function->shared()->is_compiled()) {
+ function->set_code(function->shared()->code());
+ return true;
+ } else {
+ CompilationInfo info(function, 0, receiver);
+ bool result = CompileLazyHelper(&info, flag);
+ PROFILE(FunctionCreateEvent(*function));
+ return result;
+ }
}
bool CompileLazyInLoop(Handle<JSFunction> function,
Handle<Object> receiver,
ClearExceptionFlag flag) {
- CompilationInfo info(function, 1, receiver);
- bool result = CompileLazyHelper(&info, flag);
- PROFILE(FunctionCreateEvent(*function));
- return result;
+ if (function->shared()->is_compiled()) {
+ function->set_code(function->shared()->code());
+ return true;
+ } else {
+ CompilationInfo info(function, 1, receiver);
+ bool result = CompileLazyHelper(&info, flag);
+ PROFILE(FunctionCreateEvent(*function));
+ return result;
+ }
}
@@ -809,11 +819,6 @@ OptimizedObjectForAddingMultipleProperties(Handle<JSObject> object,
}
-Handle<Code> ComputeLazyCompile(int argc) {
- CALL_HEAP_FUNCTION(StubCache::ComputeLazyCompile(argc), Code);
-}
-
-
OptimizedObjectForAddingMultipleProperties::
~OptimizedObjectForAddingMultipleProperties() {
// Reoptimize the object to allow fast property access.
diff --git a/deps/v8/src/handles.h b/deps/v8/src/handles.h
index 1e14daf9a..135dbfb5b 100644
--- a/deps/v8/src/handles.h
+++ b/deps/v8/src/handles.h
@@ -353,9 +353,6 @@ bool CompileLazyInLoop(Handle<JSFunction> function,
Handle<Object> receiver,
ClearExceptionFlag flag);
-// Returns the lazy compilation stub for argc arguments.
-Handle<Code> ComputeLazyCompile(int argc);
-
class NoHandleAllocation BASE_EMBEDDED {
public:
#ifndef DEBUG
diff --git a/deps/v8/src/heap-profiler.cc b/deps/v8/src/heap-profiler.cc
index 92ded7b34..7668bbc15 100644
--- a/deps/v8/src/heap-profiler.cc
+++ b/deps/v8/src/heap-profiler.cc
@@ -111,10 +111,10 @@ int Clusterizer::CalculateNetworkSize(JSObject* obj) {
int size = obj->Size();
// If 'properties' and 'elements' are non-empty (thus, non-shared),
// take their size into account.
- if (FixedArray::cast(obj->properties())->length() != 0) {
+ if (obj->properties() != Heap::empty_fixed_array()) {
size += obj->properties()->Size();
}
- if (FixedArray::cast(obj->elements())->length() != 0) {
+ if (obj->elements() != Heap::empty_fixed_array()) {
size += obj->elements()->Size();
}
// For functions, also account non-empty context and literals sizes.
@@ -360,7 +360,7 @@ HeapSnapshot* HeapProfiler::TakeSnapshot(String* name) {
HeapSnapshot* HeapProfiler::TakeSnapshotImpl(const char* name) {
- Heap::CollectAllGarbage(false);
+ Heap::CollectAllGarbage(true);
HeapSnapshot* result = snapshots_->NewSnapshot(name, next_snapshot_uid_++);
HeapSnapshotGenerator generator(result);
generator.GenerateSnapshot();
diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc
index c4d0439e0..1d696c7a1 100644
--- a/deps/v8/src/heap.cc
+++ b/deps/v8/src/heap.cc
@@ -37,6 +37,7 @@
#include "global-handles.h"
#include "mark-compact.h"
#include "natives.h"
+#include "objects-visiting.h"
#include "scanner.h"
#include "scopeinfo.h"
#include "snapshot.h"
@@ -1032,6 +1033,17 @@ void Heap::UpdateNewSpaceReferencesInExternalStringTable(
}
+class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
+ public:
+ static inline void VisitPointer(Object** p) {
+ Object* object = *p;
+ if (!Heap::InNewSpace(object)) return;
+ Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
+ reinterpret_cast<HeapObject*>(object));
+ }
+};
+
+
Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
Address new_space_front) {
do {
@@ -1042,10 +1054,7 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
// queue is empty.
while (new_space_front < new_space_.top()) {
HeapObject* object = HeapObject::FromAddress(new_space_front);
- Map* map = object->map();
- int size = object->SizeFromMap(map);
- object->IterateBody(map->instance_type(), size, scavenge_visitor);
- new_space_front += size;
+ new_space_front += NewSpaceScavenger::IterateBody(object->map(), object);
}
// Promote and process all the to-be-promoted objects.
@@ -1072,315 +1081,231 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
}
-#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
-static void RecordCopiedObject(HeapObject* obj) {
- bool should_record = false;
-#ifdef DEBUG
- should_record = FLAG_heap_stats;
-#endif
-#ifdef ENABLE_LOGGING_AND_PROFILING
- should_record = should_record || FLAG_log_gc;
-#endif
- if (should_record) {
- if (Heap::new_space()->Contains(obj)) {
- Heap::new_space()->RecordAllocation(obj);
- } else {
- Heap::new_space()->RecordPromotion(obj);
- }
- }
-}
-#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
+class ScavengingVisitor : public StaticVisitorBase {
+ public:
+ static void Initialize() {
+ table_.Register(kVisitSeqAsciiString, &EvacuateSeqAsciiString);
+ table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
+ table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
+ table_.Register(kVisitByteArray, &EvacuateByteArray);
+ table_.Register(kVisitFixedArray, &EvacuateFixedArray);
+ typedef ObjectEvacuationStrategy<POINTER_OBJECT> PointerObject;
-// Helper function used by CopyObject to copy a source object to an
-// allocated target object and update the forwarding pointer in the source
-// object. Returns the target object.
-inline static HeapObject* MigrateObject(HeapObject* source,
- HeapObject* target,
- int size) {
- // Copy the content of source to target.
- Heap::CopyBlock(target->address(), source->address(), size);
+ table_.Register(kVisitConsString,
+ &ObjectEvacuationStrategy<POINTER_OBJECT>::
+ VisitSpecialized<ConsString::kSize>);
- // Set the forwarding address.
- source->set_map_word(MapWord::FromForwardingAddress(target));
+ table_.Register(kVisitSharedFunctionInfo,
+ &ObjectEvacuationStrategy<POINTER_OBJECT>::
+ VisitSpecialized<SharedFunctionInfo::kSize>);
-#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
- // Update NewSpace stats if necessary.
- RecordCopiedObject(target);
-#endif
- HEAP_PROFILE(ObjectMoveEvent(source->address(), target->address()));
+ table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
+ kVisitDataObject,
+ kVisitDataObjectGeneric>();
- return target;
-}
+ table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
+ kVisitJSObject,
+ kVisitJSObjectGeneric>();
+ table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
+ kVisitStruct,
+ kVisitStructGeneric>();
+ }
-enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
-enum SizeRestriction { SMALL, UNKNOWN_SIZE };
+ static inline void Scavenge(Map* map, HeapObject** slot, HeapObject* obj) {
+ table_.GetVisitor(map)(map, slot, obj);
+ }
-template<ObjectContents object_contents, SizeRestriction size_restriction>
-static inline void EvacuateObject(Map* map,
- HeapObject** slot,
- HeapObject* object,
- int object_size) {
- ASSERT((size_restriction != SMALL) ||
- (object_size <= Page::kMaxHeapObjectSize));
- ASSERT(object->Size() == object_size);
- if (Heap::ShouldBePromoted(object->address(), object_size)) {
- Object* result;
+ private:
+ enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
+ enum SizeRestriction { SMALL, UNKNOWN_SIZE };
- if ((size_restriction != SMALL) &&
- (object_size > Page::kMaxHeapObjectSize)) {
- result = Heap::lo_space()->AllocateRawFixedArray(object_size);
- } else {
- if (object_contents == DATA_OBJECT) {
- result = Heap::old_data_space()->AllocateRaw(object_size);
+#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
+ static void RecordCopiedObject(HeapObject* obj) {
+ bool should_record = false;
+#ifdef DEBUG
+ should_record = FLAG_heap_stats;
+#endif
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ should_record = should_record || FLAG_log_gc;
+#endif
+ if (should_record) {
+ if (Heap::new_space()->Contains(obj)) {
+ Heap::new_space()->RecordAllocation(obj);
} else {
- result = Heap::old_pointer_space()->AllocateRaw(object_size);
+ Heap::new_space()->RecordPromotion(obj);
}
}
-
- if (!result->IsFailure()) {
- HeapObject* target = HeapObject::cast(result);
- *slot = MigrateObject(object, target, object_size);
-
- if (object_contents == POINTER_OBJECT) {
- promotion_queue.insert(target, object_size);
- }
-
- Heap::tracer()->increment_promoted_objects_size(object_size);
- return;
- }
}
- Object* result = Heap::new_space()->AllocateRaw(object_size);
- ASSERT(!result->IsFailure());
- *slot = MigrateObject(object, HeapObject::cast(result), object_size);
- return;
-}
+#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
+ // Helper function used by CopyObject to copy a source object to an
+ // allocated target object and update the forwarding pointer in the source
+ // object. Returns the target object.
+ INLINE(static HeapObject* MigrateObject(HeapObject* source,
+ HeapObject* target,
+ int size)) {
+ // Copy the content of source to target.
+ Heap::CopyBlock(target->address(), source->address(), size);
-template<int object_size_in_words, ObjectContents object_contents>
-static inline void EvacuateObjectOfFixedSize(Map* map,
- HeapObject** slot,
- HeapObject* object) {
- const int object_size = object_size_in_words << kPointerSizeLog2;
- EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
-}
+ // Set the forwarding address.
+ source->set_map_word(MapWord::FromForwardingAddress(target));
+#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
+ // Update NewSpace stats if necessary.
+ RecordCopiedObject(target);
+#endif
+ HEAP_PROFILE(ObjectMoveEvent(source->address(), target->address()));
-template<ObjectContents object_contents>
-static inline void EvacuateObjectOfFixedSize(Map* map,
- HeapObject** slot,
- HeapObject* object) {
- int object_size = map->instance_size();
- EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
-}
+ return target;
+ }
-static inline void EvacuateFixedArray(Map* map,
- HeapObject** slot,
- HeapObject* object) {
- int object_size = FixedArray::cast(object)->FixedArraySize();
- EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
-}
+ template<ObjectContents object_contents, SizeRestriction size_restriction>
+ static inline void EvacuateObject(Map* map,
+ HeapObject** slot,
+ HeapObject* object,
+ int object_size) {
+ ASSERT((size_restriction != SMALL) ||
+ (object_size <= Page::kMaxHeapObjectSize));
+ ASSERT(object->Size() == object_size);
+ if (Heap::ShouldBePromoted(object->address(), object_size)) {
+ Object* result;
-static inline void EvacuateByteArray(Map* map,
- HeapObject** slot,
- HeapObject* object) {
- int object_size = ByteArray::cast(object)->ByteArraySize();
- EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
-}
+ if ((size_restriction != SMALL) &&
+ (object_size > Page::kMaxHeapObjectSize)) {
+ result = Heap::lo_space()->AllocateRawFixedArray(object_size);
+ } else {
+ if (object_contents == DATA_OBJECT) {
+ result = Heap::old_data_space()->AllocateRaw(object_size);
+ } else {
+ result = Heap::old_pointer_space()->AllocateRaw(object_size);
+ }
+ }
+ if (!result->IsFailure()) {
+ HeapObject* target = HeapObject::cast(result);
+ *slot = MigrateObject(object, target, object_size);
-static Scavenger GetScavengerForSize(int object_size,
- ObjectContents object_contents) {
- ASSERT(IsAligned(object_size, kPointerSize));
- ASSERT(object_size < Page::kMaxHeapObjectSize);
+ if (object_contents == POINTER_OBJECT) {
+ promotion_queue.insert(target, object_size);
+ }
- switch (object_size >> kPointerSizeLog2) {
-#define CASE(n) \
- case n: \
- if (object_contents == DATA_OBJECT) { \
- return static_cast<Scavenger>( \
- &EvacuateObjectOfFixedSize<n, DATA_OBJECT>); \
- } else { \
- return static_cast<Scavenger>( \
- &EvacuateObjectOfFixedSize<n, POINTER_OBJECT>); \
+ Heap::tracer()->increment_promoted_objects_size(object_size);
+ return;
}
+ }
+ Object* result = Heap::new_space()->AllocateRaw(object_size);
+ ASSERT(!result->IsFailure());
+ *slot = MigrateObject(object, HeapObject::cast(result), object_size);
+ return;
+ }
- CASE(1);
- CASE(2);
- CASE(3);
- CASE(4);
- CASE(5);
- CASE(6);
- CASE(7);
- CASE(8);
- CASE(9);
- CASE(10);
- CASE(11);
- CASE(12);
- CASE(13);
- CASE(14);
- CASE(15);
- CASE(16);
- default:
- if (object_contents == DATA_OBJECT) {
- return static_cast<Scavenger>(&EvacuateObjectOfFixedSize<DATA_OBJECT>);
- } else {
- return static_cast<Scavenger>(
- &EvacuateObjectOfFixedSize<POINTER_OBJECT>);
- }
-#undef CASE
+ static inline void EvacuateFixedArray(Map* map,
+ HeapObject** slot,
+ HeapObject* object) {
+ int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
+ EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE>(map,
+ slot,
+ object,
+ object_size);
}
-}
-static inline void EvacuateSeqAsciiString(Map* map,
- HeapObject** slot,
- HeapObject* object) {
- int object_size = SeqAsciiString::cast(object)->
- SeqAsciiStringSize(map->instance_type());
- EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
-}
+ static inline void EvacuateByteArray(Map* map,
+ HeapObject** slot,
+ HeapObject* object) {
+ int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
+ EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
+ }
-static inline void EvacuateSeqTwoByteString(Map* map,
+ static inline void EvacuateSeqAsciiString(Map* map,
HeapObject** slot,
HeapObject* object) {
- int object_size = SeqTwoByteString::cast(object)->
- SeqTwoByteStringSize(map->instance_type());
- EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
-}
+ int object_size = SeqAsciiString::cast(object)->
+ SeqAsciiStringSize(map->instance_type());
+ EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
+ }
-static inline bool IsShortcutCandidate(int type) {
- return ((type & kShortcutTypeMask) == kShortcutTypeTag);
-}
+ static inline void EvacuateSeqTwoByteString(Map* map,
+ HeapObject** slot,
+ HeapObject* object) {
+ int object_size = SeqTwoByteString::cast(object)->
+ SeqTwoByteStringSize(map->instance_type());
+ EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
+ }
-static inline void EvacuateShortcutCandidate(Map* map,
- HeapObject** slot,
- HeapObject* object) {
- ASSERT(IsShortcutCandidate(map->instance_type()));
+ static inline bool IsShortcutCandidate(int type) {
+ return ((type & kShortcutTypeMask) == kShortcutTypeTag);
+ }
- if (ConsString::cast(object)->unchecked_second() == Heap::empty_string()) {
- HeapObject* first =
- HeapObject::cast(ConsString::cast(object)->unchecked_first());
+ static inline void EvacuateShortcutCandidate(Map* map,
+ HeapObject** slot,
+ HeapObject* object) {
+ ASSERT(IsShortcutCandidate(map->instance_type()));
- *slot = first;
+ if (ConsString::cast(object)->unchecked_second() == Heap::empty_string()) {
+ HeapObject* first =
+ HeapObject::cast(ConsString::cast(object)->unchecked_first());
- if (!Heap::InNewSpace(first)) {
- object->set_map_word(MapWord::FromForwardingAddress(first));
- return;
- }
+ *slot = first;
+
+ if (!Heap::InNewSpace(first)) {
+ object->set_map_word(MapWord::FromForwardingAddress(first));
+ return;
+ }
- MapWord first_word = first->map_word();
- if (first_word.IsForwardingAddress()) {
- HeapObject* target = first_word.ToForwardingAddress();
+ MapWord first_word = first->map_word();
+ if (first_word.IsForwardingAddress()) {
+ HeapObject* target = first_word.ToForwardingAddress();
+
+ *slot = target;
+ object->set_map_word(MapWord::FromForwardingAddress(target));
+ return;
+ }
- *slot = target;
- object->set_map_word(MapWord::FromForwardingAddress(target));
+ Scavenge(first->map(), slot, first);
+ object->set_map_word(MapWord::FromForwardingAddress(*slot));
return;
}
- first->map()->Scavenge(slot, first);
- object->set_map_word(MapWord::FromForwardingAddress(*slot));
- return;
+ int object_size = ConsString::kSize;
+ EvacuateObject<POINTER_OBJECT, SMALL>(map, slot, object, object_size);
}
- int object_size = ConsString::kSize;
- EvacuateObject<POINTER_OBJECT, SMALL>(map, slot, object, object_size);
-}
-
-
-Scavenger Heap::GetScavenger(int instance_type, int instance_size) {
- if (instance_type < FIRST_NONSTRING_TYPE) {
- switch (instance_type & kStringRepresentationMask) {
- case kSeqStringTag:
- if ((instance_type & kStringEncodingMask) == kAsciiStringTag) {
- return &EvacuateSeqAsciiString;
- } else {
- return &EvacuateSeqTwoByteString;
- }
-
- case kConsStringTag:
- if (IsShortcutCandidate(instance_type)) {
- return &EvacuateShortcutCandidate;
- } else {
- ASSERT(instance_size == ConsString::kSize);
- return GetScavengerForSize(ConsString::kSize, POINTER_OBJECT);
- }
-
- case kExternalStringTag:
- ASSERT(instance_size == ExternalString::kSize);
- return GetScavengerForSize(ExternalString::kSize, DATA_OBJECT);
+ template<ObjectContents object_contents>
+ class ObjectEvacuationStrategy {
+ public:
+ template<int object_size>
+ static inline void VisitSpecialized(Map* map,
+ HeapObject** slot,
+ HeapObject* object) {
+ EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
}
- UNREACHABLE();
- }
- switch (instance_type) {
- case BYTE_ARRAY_TYPE:
- return reinterpret_cast<Scavenger>(&EvacuateByteArray);
-
- case FIXED_ARRAY_TYPE:
- return reinterpret_cast<Scavenger>(&EvacuateFixedArray);
-
- case JS_OBJECT_TYPE:
- case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
- case JS_VALUE_TYPE:
- case JS_ARRAY_TYPE:
- case JS_REGEXP_TYPE:
- case JS_FUNCTION_TYPE:
- case JS_GLOBAL_PROXY_TYPE:
- case JS_GLOBAL_OBJECT_TYPE:
- case JS_BUILTINS_OBJECT_TYPE:
- return GetScavengerForSize(instance_size, POINTER_OBJECT);
-
- case ODDBALL_TYPE:
- return NULL;
-
- case PROXY_TYPE:
- return GetScavengerForSize(Proxy::kSize, DATA_OBJECT);
+ static inline void Visit(Map* map,
+ HeapObject** slot,
+ HeapObject* object) {
+ int object_size = map->instance_size();
+ EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
+ }
+ };
- case MAP_TYPE:
- return NULL;
+ typedef void (*Callback)(Map* map, HeapObject** slot, HeapObject* object);
- case CODE_TYPE:
- return NULL;
+ static VisitorDispatchTable<Callback> table_;
+};
- case JS_GLOBAL_PROPERTY_CELL_TYPE:
- return NULL;
- case HEAP_NUMBER_TYPE:
- case FILLER_TYPE:
- case PIXEL_ARRAY_TYPE:
- case EXTERNAL_BYTE_ARRAY_TYPE:
- case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
- case EXTERNAL_SHORT_ARRAY_TYPE:
- case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
- case EXTERNAL_INT_ARRAY_TYPE:
- case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
- case EXTERNAL_FLOAT_ARRAY_TYPE:
- return GetScavengerForSize(instance_size, DATA_OBJECT);
-
- case SHARED_FUNCTION_INFO_TYPE:
- return GetScavengerForSize(SharedFunctionInfo::kAlignedSize,
- POINTER_OBJECT);
-
-#define MAKE_STRUCT_CASE(NAME, Name, name) \
- case NAME##_TYPE:
- STRUCT_LIST(MAKE_STRUCT_CASE)
-#undef MAKE_STRUCT_CASE
- return GetScavengerForSize(instance_size, POINTER_OBJECT);
- default:
- UNREACHABLE();
- return NULL;
- }
-}
+VisitorDispatchTable<ScavengingVisitor::Callback> ScavengingVisitor::table_;
void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
@@ -1388,7 +1313,7 @@ void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
MapWord first_word = object->map_word();
ASSERT(!first_word.IsForwardingAddress());
Map* map = first_word.ToMap();
- map->Scavenge(p, object);
+ ScavengingVisitor::Scavenge(map, p, object);
}
@@ -1407,7 +1332,8 @@ Object* Heap::AllocatePartialMap(InstanceType instance_type,
reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
reinterpret_cast<Map*>(result)->
- set_scavenger(GetScavenger(instance_type, instance_size));
+ set_visitor_id(
+ StaticVisitorBase::GetVisitorId(instance_type, instance_size));
reinterpret_cast<Map*>(result)->set_inobject_properties(0);
reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
@@ -1424,7 +1350,8 @@ Object* Heap::AllocateMap(InstanceType instance_type, int instance_size) {
Map* map = reinterpret_cast<Map*>(result);
map->set_map(meta_map());
map->set_instance_type(instance_type);
- map->set_scavenger(GetScavenger(instance_type, instance_size));
+ map->set_visitor_id(
+ StaticVisitorBase::GetVisitorId(instance_type, instance_size));
map->set_prototype(null_value());
map->set_constructor(null_value());
map->set_instance_size(instance_size);
@@ -2452,39 +2379,61 @@ class FlushingStackVisitor : public ThreadVisitor {
};
-static void FlushCodeForFunction(SharedFunctionInfo* function_info) {
+static bool CodeIsActive(Code* code) {
+ // Make sure we are not referencing the code from the stack.
+ for (StackFrameIterator it; !it.done(); it.Advance()) {
+ if (code->contains(it.frame()->pc())) return true;
+ }
+ // Iterate the archived stacks in all threads to check if
+ // the code is referenced.
+ FlushingStackVisitor threadvisitor(code);
+ ThreadManager::IterateArchivedThreads(&threadvisitor);
+ if (threadvisitor.FoundCode()) return true;
+ return false;
+}
+
+
+static void FlushCodeForFunction(JSFunction* function) {
+ SharedFunctionInfo* shared_info = function->shared();
+
+ // Special handling if the function and shared info objects
+ // have different code objects.
+ if (function->code() != shared_info->code()) {
+ // If the shared function has been flushed but the function has not,
+ // we flush the function if possible.
+ if (!shared_info->is_compiled() && function->is_compiled() &&
+ !CodeIsActive(function->code())) {
+ function->set_code(shared_info->code());
+ }
+ return;
+ }
+
// The function must be compiled and have the source code available,
// to be able to recompile it in case we need the function again.
- if (!(function_info->is_compiled() && function_info->HasSourceCode())) return;
+ if (!(shared_info->is_compiled() && shared_info->HasSourceCode())) return;
// We never flush code for Api functions.
- if (function_info->IsApiFunction()) return;
+ if (shared_info->IsApiFunction()) return;
// Only flush code for functions.
- if (!function_info->code()->kind() == Code::FUNCTION) return;
+ if (!shared_info->code()->kind() == Code::FUNCTION) return;
// Function must be lazy compilable.
- if (!function_info->allows_lazy_compilation()) return;
+ if (!shared_info->allows_lazy_compilation()) return;
// If this is a full script wrapped in a function we do no flush the code.
- if (function_info->is_toplevel()) return;
+ if (shared_info->is_toplevel()) return;
// If this function is in the compilation cache we do not flush the code.
- if (CompilationCache::HasFunction(function_info)) return;
+ if (CompilationCache::HasFunction(shared_info)) return;
- // Make sure we are not referencing the code from the stack.
- for (StackFrameIterator it; !it.done(); it.Advance()) {
- if (function_info->code()->contains(it.frame()->pc())) return;
- }
- // Iterate the archived stacks in all threads to check if
- // the code is referenced.
- FlushingStackVisitor threadvisitor(function_info->code());
- ThreadManager::IterateArchivedThreads(&threadvisitor);
- if (threadvisitor.FoundCode()) return;
+ // Check stack and archived threads for the code.
+ if (CodeIsActive(shared_info->code())) return;
// Compute the lazy compilable version of the code.
- HandleScope scope;
- function_info->set_code(*ComputeLazyCompile(function_info->length()));
+ Code* code = Builtins::builtin(Builtins::LazyCompile);
+ shared_info->set_code(code);
+ function->set_code(code);
}
@@ -2496,12 +2445,12 @@ void Heap::FlushCode() {
HeapObjectIterator it(old_pointer_space());
for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
if (obj->IsJSFunction()) {
- JSFunction* jsfunction = JSFunction::cast(obj);
+ JSFunction* function = JSFunction::cast(obj);
// The function must have a valid context and not be a builtin.
- if (jsfunction->unchecked_context()->IsContext() &&
- !jsfunction->IsBuiltin()) {
- FlushCodeForFunction(jsfunction->shared());
+ if (function->unchecked_context()->IsContext() &&
+ !function->IsBuiltin()) {
+ FlushCodeForFunction(function);
}
}
}
@@ -2651,6 +2600,7 @@ Object* Heap::InitializeFunction(JSFunction* function,
function->initialize_properties();
function->initialize_elements();
function->set_shared(shared);
+ function->set_code(shared->code());
function->set_prototype_or_initial_map(prototype);
function->set_context(undefined_value());
function->set_literals(empty_fixed_array());
@@ -4000,7 +3950,7 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
v->Synchronize("strong_root_list");
- v->VisitPointer(BitCast<Object**, String**>(&hidden_symbol_));
+ v->VisitPointer(BitCast<Object**>(&hidden_symbol_));
v->Synchronize("symbol");
Bootstrapper::Iterate(v);
@@ -4126,6 +4076,7 @@ void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
*stats->memory_allocator_size = MemoryAllocator::Size();
*stats->memory_allocator_capacity =
MemoryAllocator::Size() + MemoryAllocator::Available();
+ *stats->os_error = OS::GetLastError();
if (take_snapshot) {
HeapIterator iterator;
for (HeapObject* obj = iterator.next();
@@ -4174,6 +4125,10 @@ bool Heap::Setup(bool create_heap_objects) {
if (!ConfigureHeapDefault()) return false;
}
+ ScavengingVisitor::Initialize();
+ NewSpaceScavenger::Initialize();
+ MarkCompactCollector::Initialize();
+
// Setup memory allocator and reserve a chunk of memory for new
// space. The chunk is double the size of the requested reserved
// new space size to ensure that we can find a pair of semispaces that
@@ -4858,6 +4813,7 @@ GCTracer::~GCTracer() {
PrintF("external=%d ", static_cast<int>(scopes_[Scope::EXTERNAL]));
PrintF("mark=%d ", static_cast<int>(scopes_[Scope::MC_MARK]));
PrintF("sweep=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP]));
+ PrintF("sweepns=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP_NEWSPACE]));
PrintF("compact=%d ", static_cast<int>(scopes_[Scope::MC_COMPACT]));
PrintF("flushcode=%d ", static_cast<int>(scopes_[Scope::MC_FLUSH_CODE]));
diff --git a/deps/v8/src/heap.h b/deps/v8/src/heap.h
index a0b2157aa..93b90b184 100644
--- a/deps/v8/src/heap.h
+++ b/deps/v8/src/heap.h
@@ -983,8 +983,6 @@ class Heap : public AllStatic {
static void RecordStats(HeapStats* stats, bool take_snapshot = false);
- static Scavenger GetScavenger(int instance_type, int instance_size);
-
// Copy block of memory from src to dst. Size of block should be aligned
// by pointer size.
static inline void CopyBlock(Address dst, Address src, int byte_size);
@@ -1347,7 +1345,8 @@ class HeapStats {
int* memory_allocator_capacity; // 20
int* objects_per_type; // 21
int* size_per_type; // 22
- int* end_marker; // 23
+ int* os_error; // 23
+ int* end_marker; // 24
};
@@ -1725,6 +1724,7 @@ class GCTracer BASE_EMBEDDED {
EXTERNAL,
MC_MARK,
MC_SWEEP,
+ MC_SWEEP_NEWSPACE,
MC_COMPACT,
MC_FLUSH_CODE,
kNumberOfScopes
diff --git a/deps/v8/src/ia32/assembler-ia32-inl.h b/deps/v8/src/ia32/assembler-ia32-inl.h
index 7fa151e9e..ecbdfdcff 100644
--- a/deps/v8/src/ia32/assembler-ia32-inl.h
+++ b/deps/v8/src/ia32/assembler-ia32-inl.h
@@ -183,6 +183,30 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
}
+template<typename StaticVisitor>
+void RelocInfo::Visit() {
+ RelocInfo::Mode mode = rmode();
+ if (mode == RelocInfo::EMBEDDED_OBJECT) {
+ StaticVisitor::VisitPointer(target_object_address());
+ } else if (RelocInfo::IsCodeTarget(mode)) {
+ StaticVisitor::VisitCodeTarget(this);
+ } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
+ StaticVisitor::VisitExternalReference(target_reference_address());
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ } else if (Debug::has_break_points() &&
+ ((RelocInfo::IsJSReturn(mode) &&
+ IsPatchedReturnSequence()) ||
+ (RelocInfo::IsDebugBreakSlot(mode) &&
+ IsPatchedDebugBreakSlotSequence()))) {
+ StaticVisitor::VisitDebugTarget(this);
+#endif
+ } else if (mode == RelocInfo::RUNTIME_ENTRY) {
+ StaticVisitor::VisitRuntimeEntry(this);
+ }
+}
+
+
+
Immediate::Immediate(int x) {
x_ = x;
rmode_ = RelocInfo::NONE;
diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc
index 6c830cba1..2565acb53 100644
--- a/deps/v8/src/ia32/assembler-ia32.cc
+++ b/deps/v8/src/ia32/assembler-ia32.cc
@@ -1142,6 +1142,21 @@ void Assembler::rcl(Register dst, uint8_t imm8) {
}
+void Assembler::rcr(Register dst, uint8_t imm8) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ ASSERT(is_uint5(imm8)); // illegal shift count
+ if (imm8 == 1) {
+ EMIT(0xD1);
+ EMIT(0xD8 | dst.code());
+ } else {
+ EMIT(0xC1);
+ EMIT(0xD8 | dst.code());
+ EMIT(imm8);
+ }
+}
+
+
void Assembler::sar(Register dst, uint8_t imm8) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h
index c76c55cf5..8a5a4c5f5 100644
--- a/deps/v8/src/ia32/assembler-ia32.h
+++ b/deps/v8/src/ia32/assembler-ia32.h
@@ -625,6 +625,7 @@ class Assembler : public Malloced {
void or_(const Operand& dst, const Immediate& x);
void rcl(Register dst, uint8_t imm8);
+ void rcr(Register dst, uint8_t imm8);
void sar(Register dst, uint8_t imm8);
void sar_cl(Register dst);
diff --git a/deps/v8/src/ia32/builtins-ia32.cc b/deps/v8/src/ia32/builtins-ia32.cc
index 3adb014b1..35a90a4ac 100644
--- a/deps/v8/src/ia32/builtins-ia32.cc
+++ b/deps/v8/src/ia32/builtins-ia32.cc
@@ -429,6 +429,26 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
}
+void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
+ // Enter an internal frame.
+ __ EnterInternalFrame();
+
+ // Push a copy of the function onto the stack.
+ __ push(edi);
+
+ __ push(edi); // Function is also the parameter to the runtime call.
+ __ CallRuntime(Runtime::kLazyCompile, 1);
+ __ pop(edi);
+
+ // Tear down temporary frame.
+ __ LeaveInternalFrame();
+
+ // Do a tail-call of the compiled function.
+ __ lea(ecx, FieldOperand(eax, Code::kHeaderSize));
+ __ jmp(Operand(ecx));
+}
+
+
void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// 1. Make sure we have at least one argument.
{ Label done;
@@ -548,7 +568,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ mov(ebx,
FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
__ SmiUntag(ebx);
- __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
+ __ mov(edx, FieldOperand(edi, JSFunction::kCodeOffset));
__ lea(edx, FieldOperand(edx, Code::kHeaderSize));
__ cmp(eax, Operand(ebx));
__ j(not_equal, Handle<Code>(builtin(ArgumentsAdaptorTrampoline)));
diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc
index ba7785b2d..cc89cc7db 100644
--- a/deps/v8/src/ia32/codegen-ia32.cc
+++ b/deps/v8/src/ia32/codegen-ia32.cc
@@ -202,105 +202,92 @@ void CodeGenerator::Generate(CompilationInfo* info) {
// esi: callee's context
allocator_->Initialize();
- if (info->mode() == CompilationInfo::PRIMARY) {
- frame_->Enter();
-
- // Allocate space for locals and initialize them.
- frame_->AllocateStackSlots();
-
- // Allocate the local context if needed.
- int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0) {
- Comment cmnt(masm_, "[ allocate local context");
- // Allocate local context.
- // Get outer context and create a new context based on it.
- frame_->PushFunction();
- Result context;
- if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(heap_slots);
- context = frame_->CallStub(&stub, 1);
- } else {
- context = frame_->CallRuntime(Runtime::kNewContext, 1);
- }
+ frame_->Enter();
+
+ // Allocate space for locals and initialize them.
+ frame_->AllocateStackSlots();
+
+ // Allocate the local context if needed.
+ int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ if (heap_slots > 0) {
+ Comment cmnt(masm_, "[ allocate local context");
+ // Allocate local context.
+ // Get outer context and create a new context based on it.
+ frame_->PushFunction();
+ Result context;
+ if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(heap_slots);
+ context = frame_->CallStub(&stub, 1);
+ } else {
+ context = frame_->CallRuntime(Runtime::kNewContext, 1);
+ }
- // Update context local.
- frame_->SaveContextRegister();
+ // Update context local.
+ frame_->SaveContextRegister();
- // Verify that the runtime call result and esi agree.
- if (FLAG_debug_code) {
- __ cmp(context.reg(), Operand(esi));
- __ Assert(equal, "Runtime::NewContext should end up in esi");
- }
+ // Verify that the runtime call result and esi agree.
+ if (FLAG_debug_code) {
+ __ cmp(context.reg(), Operand(esi));
+ __ Assert(equal, "Runtime::NewContext should end up in esi");
}
+ }
- // TODO(1241774): Improve this code:
- // 1) only needed if we have a context
- // 2) no need to recompute context ptr every single time
- // 3) don't copy parameter operand code from SlotOperand!
- {
- Comment cmnt2(masm_, "[ copy context parameters into .context");
- // Note that iteration order is relevant here! If we have the same
- // parameter twice (e.g., function (x, y, x)), and that parameter
- // needs to be copied into the context, it must be the last argument
- // passed to the parameter that needs to be copied. This is a rare
- // case so we don't check for it, instead we rely on the copying
- // order: such a parameter is copied repeatedly into the same
- // context location and thus the last value is what is seen inside
- // the function.
- for (int i = 0; i < scope()->num_parameters(); i++) {
- Variable* par = scope()->parameter(i);
- Slot* slot = par->slot();
- if (slot != NULL && slot->type() == Slot::CONTEXT) {
- // The use of SlotOperand below is safe in unspilled code
- // because the slot is guaranteed to be a context slot.
- //
- // There are no parameters in the global scope.
- ASSERT(!scope()->is_global_scope());
- frame_->PushParameterAt(i);
- Result value = frame_->Pop();
- value.ToRegister();
-
- // SlotOperand loads context.reg() with the context object
- // stored to, used below in RecordWrite.
- Result context = allocator_->Allocate();
- ASSERT(context.is_valid());
- __ mov(SlotOperand(slot, context.reg()), value.reg());
- int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
- Result scratch = allocator_->Allocate();
- ASSERT(scratch.is_valid());
- frame_->Spill(context.reg());
- frame_->Spill(value.reg());
- __ RecordWrite(context.reg(), offset, value.reg(), scratch.reg());
- }
+ // TODO(1241774): Improve this code:
+ // 1) only needed if we have a context
+ // 2) no need to recompute context ptr every single time
+ // 3) don't copy parameter operand code from SlotOperand!
+ {
+ Comment cmnt2(masm_, "[ copy context parameters into .context");
+ // Note that iteration order is relevant here! If we have the same
+ // parameter twice (e.g., function (x, y, x)), and that parameter
+ // needs to be copied into the context, it must be the last argument
+ // passed to the parameter that needs to be copied. This is a rare
+ // case so we don't check for it, instead we rely on the copying
+ // order: such a parameter is copied repeatedly into the same
+ // context location and thus the last value is what is seen inside
+ // the function.
+ for (int i = 0; i < scope()->num_parameters(); i++) {
+ Variable* par = scope()->parameter(i);
+ Slot* slot = par->slot();
+ if (slot != NULL && slot->type() == Slot::CONTEXT) {
+ // The use of SlotOperand below is safe in unspilled code
+ // because the slot is guaranteed to be a context slot.
+ //
+ // There are no parameters in the global scope.
+ ASSERT(!scope()->is_global_scope());
+ frame_->PushParameterAt(i);
+ Result value = frame_->Pop();
+ value.ToRegister();
+
+ // SlotOperand loads context.reg() with the context object
+ // stored to, used below in RecordWrite.
+ Result context = allocator_->Allocate();
+ ASSERT(context.is_valid());
+ __ mov(SlotOperand(slot, context.reg()), value.reg());
+ int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
+ Result scratch = allocator_->Allocate();
+ ASSERT(scratch.is_valid());
+ frame_->Spill(context.reg());
+ frame_->Spill(value.reg());
+ __ RecordWrite(context.reg(), offset, value.reg(), scratch.reg());
}
}
+ }
- // Store the arguments object. This must happen after context
- // initialization because the arguments object may be stored in
- // the context.
- if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) {
- StoreArgumentsObject(true);
- }
-
- // Initialize ThisFunction reference if present.
- if (scope()->is_function_scope() && scope()->function() != NULL) {
- frame_->Push(Factory::the_hole_value());
- StoreToSlot(scope()->function()->slot(), NOT_CONST_INIT);
- }
- } else {
- // When used as the secondary compiler for splitting, ebp, esi,
- // and edi have been pushed on the stack. Adjust the virtual
- // frame to match this state.
- frame_->Adjust(3);
- allocator_->Unuse(edi);
+ // Store the arguments object. This must happen after context
+ // initialization because the arguments object may be stored in
+ // the context.
+ if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) {
+ StoreArgumentsObject(true);
+ }
- // Bind all the bailout labels to the beginning of the function.
- List<CompilationInfo::Bailout*>* bailouts = info->bailouts();
- for (int i = 0; i < bailouts->length(); i++) {
- __ bind(bailouts->at(i)->label());
- }
+ // Initialize ThisFunction reference if present.
+ if (scope()->is_function_scope() && scope()->function() != NULL) {
+ frame_->Push(Factory::the_hole_value());
+ StoreToSlot(scope()->function()->slot(), NOT_CONST_INIT);
}
+
// Initialize the function return target after the locals are set
// up, because it needs the expected frame height from the frame.
function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
@@ -1038,7 +1025,11 @@ const char* GenericBinaryOpStub::GetName() {
}
-// Call the specialized stub for a binary operation.
+// Perform or call the specialized stub for a binary operation. Requires the
+// three registers left, right and dst to be distinct and spilled. This
+// deferred operation has up to three entry points: The main one calls the
+// runtime system. The second is for when the result is a non-Smi. The
+// third is for when at least one of the inputs is non-Smi and we have SSE2.
class DeferredInlineBinaryOperation: public DeferredCode {
public:
DeferredInlineBinaryOperation(Token::Value op,
@@ -1051,11 +1042,23 @@ class DeferredInlineBinaryOperation: public DeferredCode {
: op_(op), dst_(dst), left_(left), right_(right),
left_info_(left_info), right_info_(right_info), mode_(mode) {
set_comment("[ DeferredInlineBinaryOperation");
+ ASSERT(!left.is(right));
}
virtual void Generate();
+ // This stub makes explicit calls to SaveRegisters(), RestoreRegisters() and
+ // Exit().
+ virtual bool AutoSaveAndRestore() { return false; }
+
+ void JumpToAnswerOutOfRange(Condition cond);
+ void JumpToConstantRhs(Condition cond, Smi* smi_value);
+ Label* NonSmiInputLabel();
+
private:
+ void GenerateAnswerOutOfRange();
+ void GenerateNonSmiInput();
+
Token::Value op_;
Register dst_;
Register left_;
@@ -1063,15 +1066,42 @@ class DeferredInlineBinaryOperation: public DeferredCode {
TypeInfo left_info_;
TypeInfo right_info_;
OverwriteMode mode_;
+ Label answer_out_of_range_;
+ Label non_smi_input_;
+ Label constant_rhs_;
+ Smi* smi_value_;
};
+Label* DeferredInlineBinaryOperation::NonSmiInputLabel() {
+ if (Token::IsBitOp(op_) && CpuFeatures::IsSupported(SSE2)) {
+ return &non_smi_input_;
+ } else {
+ return entry_label();
+ }
+}
+
+
+void DeferredInlineBinaryOperation::JumpToAnswerOutOfRange(Condition cond) {
+ __ j(cond, &answer_out_of_range_);
+}
+
+
+void DeferredInlineBinaryOperation::JumpToConstantRhs(Condition cond,
+ Smi* smi_value) {
+ smi_value_ = smi_value;
+ __ j(cond, &constant_rhs_);
+}
+
+
void DeferredInlineBinaryOperation::Generate() {
- Label done;
- if (CpuFeatures::IsSupported(SSE2) && ((op_ == Token::ADD) ||
- (op_ ==Token::SUB) ||
- (op_ == Token::MUL) ||
- (op_ == Token::DIV))) {
+ // Registers are not saved implicitly for this stub, so we should not
+ // tread on the registers that were not passed to us.
+ if (CpuFeatures::IsSupported(SSE2) &&
+ ((op_ == Token::ADD) ||
+ (op_ == Token::SUB) ||
+ (op_ == Token::MUL) ||
+ (op_ == Token::DIV))) {
CpuFeatures::Scope use_sse2(SSE2);
Label call_runtime, after_alloc_failure;
Label left_smi, right_smi, load_right, do_op;
@@ -1131,7 +1161,6 @@ void DeferredInlineBinaryOperation::Generate() {
__ cvtsi2sd(xmm1, Operand(right_));
__ SmiTag(right_);
if (mode_ == OVERWRITE_RIGHT || mode_ == NO_OVERWRITE) {
- Label alloc_failure;
__ push(left_);
__ AllocateHeapNumber(dst_, left_, no_reg, &after_alloc_failure);
__ pop(left_);
@@ -1146,19 +1175,200 @@ void DeferredInlineBinaryOperation::Generate() {
default: UNREACHABLE();
}
__ movdbl(FieldOperand(dst_, HeapNumber::kValueOffset), xmm0);
- __ jmp(&done);
+ Exit();
+
__ bind(&after_alloc_failure);
__ pop(left_);
__ bind(&call_runtime);
}
+ // Register spilling is not done implicitly for this stub.
+ // We can't postpone it any more now though.
+ SaveRegisters();
+
GenericBinaryOpStub stub(op_,
mode_,
NO_SMI_CODE_IN_STUB,
TypeInfo::Combine(left_info_, right_info_));
stub.GenerateCall(masm_, left_, right_);
if (!dst_.is(eax)) __ mov(dst_, eax);
- __ bind(&done);
+ RestoreRegisters();
+ Exit();
+
+ if (non_smi_input_.is_linked() || constant_rhs_.is_linked()) {
+ GenerateNonSmiInput();
+ }
+ if (answer_out_of_range_.is_linked()) {
+ GenerateAnswerOutOfRange();
+ }
+}
+
+
+void DeferredInlineBinaryOperation::GenerateNonSmiInput() {
+ // We know at least one of the inputs was not a Smi.
+ // This is a third entry point into the deferred code.
+ // We may not overwrite left_ because we want to be able
+ // to call the handling code for non-smi answer and it
+ // might want to overwrite the heap number in left_.
+ ASSERT(!right_.is(dst_));
+ ASSERT(!left_.is(dst_));
+ ASSERT(!left_.is(right_));
+ // This entry point is used for bit ops where the right hand side
+ // is a constant Smi and the left hand side is a heap object. It
+ // is also used for bit ops where both sides are unknown, but where
+ // at least one of them is a heap object.
+ bool rhs_is_constant = constant_rhs_.is_linked();
+ // We can't generate code for both cases.
+ ASSERT(!non_smi_input_.is_linked() || !constant_rhs_.is_linked());
+
+ if (FLAG_debug_code) {
+ __ int3(); // We don't fall through into this code.
+ }
+
+ __ bind(&non_smi_input_);
+
+ if (rhs_is_constant) {
+ __ bind(&constant_rhs_);
+ // In this case the input is a heap object and it is in the dst_ register.
+ // The left_ and right_ registers have not been initialized yet.
+ __ mov(right_, Immediate(smi_value_));
+ __ mov(left_, Operand(dst_));
+ if (!CpuFeatures::IsSupported(SSE2)) {
+ __ jmp(entry_label());
+ return;
+ } else {
+ CpuFeatures::Scope use_sse2(SSE2);
+ __ JumpIfNotNumber(dst_, left_info_, entry_label());
+ __ ConvertToInt32(dst_, left_, dst_, left_info_, entry_label());
+ __ SmiUntag(right_);
+ }
+ } else {
+ // We know we have SSE2 here because otherwise the label is not linked (see
+ // NonSmiInputLabel).
+ CpuFeatures::Scope use_sse2(SSE2);
+ // Handle the non-constant right hand side situation:
+ if (left_info_.IsSmi()) {
+ // Right is a heap object.
+ __ JumpIfNotNumber(right_, right_info_, entry_label());
+ __ ConvertToInt32(right_, right_, dst_, right_info_, entry_label());
+ __ mov(dst_, Operand(left_));
+ __ SmiUntag(dst_);
+ } else if (right_info_.IsSmi()) {
+ // Left is a heap object.
+ __ JumpIfNotNumber(left_, left_info_, entry_label());
+ __ ConvertToInt32(dst_, left_, dst_, left_info_, entry_label());
+ __ SmiUntag(right_);
+ } else {
+ // Here we don't know if it's one or both that is a heap object.
+ Label only_right_is_heap_object, got_both;
+ __ mov(dst_, Operand(left_));
+ __ SmiUntag(dst_, &only_right_is_heap_object);
+ // Left was a heap object.
+ __ JumpIfNotNumber(left_, left_info_, entry_label());
+ __ ConvertToInt32(dst_, left_, dst_, left_info_, entry_label());
+ __ SmiUntag(right_, &got_both);
+ // Both were heap objects.
+ __ rcl(right_, 1); // Put tag back.
+ __ JumpIfNotNumber(right_, right_info_, entry_label());
+ __ ConvertToInt32(right_, right_, no_reg, right_info_, entry_label());
+ __ jmp(&got_both);
+ __ bind(&only_right_is_heap_object);
+ __ JumpIfNotNumber(right_, right_info_, entry_label());
+ __ ConvertToInt32(right_, right_, no_reg, right_info_, entry_label());
+ __ bind(&got_both);
+ }
+ }
+ ASSERT(op_ == Token::BIT_AND ||
+ op_ == Token::BIT_OR ||
+ op_ == Token::BIT_XOR ||
+ right_.is(ecx));
+ switch (op_) {
+ case Token::BIT_AND: __ and_(dst_, Operand(right_)); break;
+ case Token::BIT_OR: __ or_(dst_, Operand(right_)); break;
+ case Token::BIT_XOR: __ xor_(dst_, Operand(right_)); break;
+ case Token::SHR: __ shr_cl(dst_); break;
+ case Token::SAR: __ sar_cl(dst_); break;
+ case Token::SHL: __ shl_cl(dst_); break;
+ default: UNREACHABLE();
+ }
+ if (op_ == Token::SHR) {
+ // Check that the *unsigned* result fits in a smi. Neither of
+ // the two high-order bits can be set:
+ // * 0x80000000: high bit would be lost when smi tagging.
+ // * 0x40000000: this number would convert to negative when smi
+ // tagging.
+ __ test(dst_, Immediate(0xc0000000));
+ __ j(not_zero, &answer_out_of_range_);
+ } else {
+ // Check that the *signed* result fits in a smi.
+ __ cmp(dst_, 0xc0000000);
+ __ j(negative, &answer_out_of_range_);
+ }
+ __ SmiTag(dst_);
+ Exit();
+}
+
+
+void DeferredInlineBinaryOperation::GenerateAnswerOutOfRange() {
+ Label after_alloc_failure2;
+ Label allocation_ok;
+ __ bind(&after_alloc_failure2);
+ // We have to allocate a number, causing a GC, while keeping hold of
+ // the answer in dst_. The answer is not a Smi. We can't just call the
+ // runtime shift function here because we already threw away the inputs.
+ __ xor_(left_, Operand(left_));
+ __ shl(dst_, 1); // Put top bit in carry flag and Smi tag the low bits.
+ __ rcr(left_, 1); // Rotate with carry.
+ __ push(dst_); // Smi tagged low 31 bits.
+ __ push(left_); // 0 or 0x80000000, which is Smi tagged in both cases.
+ __ CallRuntime(Runtime::kNumberAlloc, 0);
+ if (!left_.is(eax)) {
+ __ mov(left_, eax);
+ }
+ __ pop(right_); // High bit.
+ __ pop(dst_); // Low 31 bits.
+ __ shr(dst_, 1); // Put 0 in top bit.
+ __ or_(dst_, Operand(right_));
+ __ jmp(&allocation_ok);
+
+ // This is the second entry point to the deferred code. It is used only by
+ // the bit operations.
+ // The dst_ register has the answer. It is not Smi tagged. If mode_ is
+ // OVERWRITE_LEFT then left_ must contain either an overwritable heap number
+ // or a Smi.
+ // Put a heap number pointer in left_.
+ __ bind(&answer_out_of_range_);
+ SaveRegisters();
+ if (mode_ == OVERWRITE_LEFT) {
+ __ test(left_, Immediate(kSmiTagMask));
+ __ j(not_zero, &allocation_ok);
+ }
+ // This trashes right_.
+ __ AllocateHeapNumber(left_, right_, no_reg, &after_alloc_failure2);
+ __ bind(&allocation_ok);
+ if (CpuFeatures::IsSupported(SSE2) && op_ != Token::SHR) {
+ CpuFeatures::Scope use_sse2(SSE2);
+ ASSERT(Token::IsBitOp(op_));
+ // Signed conversion.
+ __ cvtsi2sd(xmm0, Operand(dst_));
+ __ movdbl(FieldOperand(left_, HeapNumber::kValueOffset), xmm0);
+ } else {
+ if (op_ == Token::SHR) {
+ __ push(Immediate(0)); // High word of unsigned value.
+ __ push(dst_);
+ __ fild_d(Operand(esp, 0));
+ __ Drop(2);
+ } else {
+ ASSERT(Token::IsBitOp(op_));
+ __ push(dst_);
+ __ fild_s(Operand(esp, 0)); // Signed conversion.
+ __ pop(dst_);
+ }
+ __ fstp_d(FieldOperand(left_, HeapNumber::kValueOffset));
+ }
+ __ mov(dst_, left_);
+ RestoreRegisters();
+ Exit();
}
@@ -1499,10 +1709,25 @@ void CodeGenerator::JumpIfNotBothSmiUsingTypeInfo(Register left,
TypeInfo left_info,
TypeInfo right_info,
DeferredCode* deferred) {
+ JumpIfNotBothSmiUsingTypeInfo(left,
+ right,
+ scratch,
+ left_info,
+ right_info,
+ deferred->entry_label());
+}
+
+
+void CodeGenerator::JumpIfNotBothSmiUsingTypeInfo(Register left,
+ Register right,
+ Register scratch,
+ TypeInfo left_info,
+ TypeInfo right_info,
+ Label* on_not_smi) {
if (left.is(right)) {
if (!left_info.IsSmi()) {
__ test(left, Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
+ __ j(not_zero, on_not_smi);
} else {
if (FLAG_debug_code) __ AbortIfNotSmi(left);
}
@@ -1511,17 +1736,17 @@ void CodeGenerator::JumpIfNotBothSmiUsingTypeInfo(Register left,
__ mov(scratch, left);
__ or_(scratch, Operand(right));
__ test(scratch, Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
+ __ j(not_zero, on_not_smi);
} else {
__ test(left, Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
+ __ j(not_zero, on_not_smi);
if (FLAG_debug_code) __ AbortIfNotSmi(right);
}
} else {
if (FLAG_debug_code) __ AbortIfNotSmi(left);
if (!right_info.IsSmi()) {
__ test(right, Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
+ __ j(not_zero, on_not_smi);
} else {
if (FLAG_debug_code) __ AbortIfNotSmi(right);
}
@@ -1606,13 +1831,16 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
right->ToRegister();
frame_->Spill(eax);
frame_->Spill(edx);
+ // DeferredInlineBinaryOperation requires all the registers that it is
+ // told about to be spilled and distinct.
+ Result distinct_right = frame_->MakeDistinctAndSpilled(left, right);
// Check that left and right are smi tagged.
DeferredInlineBinaryOperation* deferred =
new DeferredInlineBinaryOperation(op,
(op == Token::DIV) ? eax : edx,
left->reg(),
- right->reg(),
+ distinct_right.reg(),
left_type_info,
right_type_info,
overwrite_mode);
@@ -1695,15 +1923,24 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
left->ToRegister();
ASSERT(left->is_register() && !left->reg().is(ecx));
ASSERT(right->is_register() && right->reg().is(ecx));
+ if (left_type_info.IsSmi()) {
+ if (FLAG_debug_code) __ AbortIfNotSmi(left->reg());
+ }
+ if (right_type_info.IsSmi()) {
+ if (FLAG_debug_code) __ AbortIfNotSmi(right->reg());
+ }
// We will modify right, it must be spilled.
frame_->Spill(ecx);
+ // DeferredInlineBinaryOperation requires all the registers that it is told
+ // about to be spilled and distinct. We know that right is ecx and left is
+ // not ecx.
+ frame_->Spill(left->reg());
// Use a fresh answer register to avoid spilling the left operand.
answer = allocator_->Allocate();
ASSERT(answer.is_valid());
- // Check that both operands are smis using the answer register as a
- // temporary.
+
DeferredInlineBinaryOperation* deferred =
new DeferredInlineBinaryOperation(op,
answer.reg(),
@@ -1712,55 +1949,28 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
left_type_info,
right_type_info,
overwrite_mode);
+ JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(), answer.reg(),
+ left_type_info, right_type_info,
+ deferred->NonSmiInputLabel());
- Label do_op, left_nonsmi;
- // If right is a smi we make a fast case if left is either a smi
- // or a heapnumber.
- if (CpuFeatures::IsSupported(SSE2) && right_type_info.IsSmi()) {
- CpuFeatures::Scope use_sse2(SSE2);
- __ mov(answer.reg(), left->reg());
- // Fast case - both are actually smis.
- if (!left_type_info.IsSmi()) {
- __ test(answer.reg(), Immediate(kSmiTagMask));
- __ j(not_zero, &left_nonsmi);
- } else {
- if (FLAG_debug_code) __ AbortIfNotSmi(left->reg());
- }
- if (FLAG_debug_code) __ AbortIfNotSmi(right->reg());
- __ SmiUntag(answer.reg());
- __ jmp(&do_op);
-
- __ bind(&left_nonsmi);
- // Branch if not a heapnumber.
- __ cmp(FieldOperand(answer.reg(), HeapObject::kMapOffset),
- Factory::heap_number_map());
- deferred->Branch(not_equal);
-
- // Load integer value into answer register using truncation.
- __ cvttsd2si(answer.reg(),
- FieldOperand(answer.reg(), HeapNumber::kValueOffset));
- // Branch if we do not fit in a smi.
- __ cmp(answer.reg(), 0xc0000000);
- deferred->Branch(negative);
- } else {
- JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(), answer.reg(),
- left_type_info, right_type_info, deferred);
-
- // Untag both operands.
- __ mov(answer.reg(), left->reg());
- __ SmiUntag(answer.reg());
- }
+ // Untag both operands.
+ __ mov(answer.reg(), left->reg());
+ __ SmiUntag(answer.reg());
+ __ SmiUntag(right->reg()); // Right is ecx.
- __ bind(&do_op);
- __ SmiUntag(ecx);
// Perform the operation.
+ ASSERT(right->reg().is(ecx));
switch (op) {
- case Token::SAR:
+ case Token::SAR: {
__ sar_cl(answer.reg());
- // No checks of result necessary
+ if (!left_type_info.IsSmi()) {
+ // Check that the *signed* result fits in a smi.
+ __ cmp(answer.reg(), 0xc0000000);
+ deferred->JumpToAnswerOutOfRange(negative);
+ }
break;
+ }
case Token::SHR: {
- Label result_ok;
__ shr_cl(answer.reg());
// Check that the *unsigned* result fits in a smi. Neither of
// the two high-order bits can be set:
@@ -1773,21 +1983,14 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
// case. The low bit of the left argument may be lost, but only
// in a case where it is dropped anyway.
__ test(answer.reg(), Immediate(0xc0000000));
- __ j(zero, &result_ok);
- __ SmiTag(ecx);
- deferred->Jump();
- __ bind(&result_ok);
+ deferred->JumpToAnswerOutOfRange(not_zero);
break;
}
case Token::SHL: {
- Label result_ok;
__ shl_cl(answer.reg());
// Check that the *signed* result fits in a smi.
__ cmp(answer.reg(), 0xc0000000);
- __ j(positive, &result_ok);
- __ SmiTag(ecx);
- deferred->Jump();
- __ bind(&result_ok);
+ deferred->JumpToAnswerOutOfRange(negative);
break;
}
default:
@@ -1805,6 +2008,9 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
// Handle the other binary operations.
left->ToRegister();
right->ToRegister();
+ // DeferredInlineBinaryOperation requires all the registers that it is told
+ // about to be spilled.
+ Result distinct_right = frame_->MakeDistinctAndSpilled(left, right);
// A newly allocated register answer is used to hold the answer. The
// registers containing left and right are not modified so they don't
// need to be spilled in the fast case.
@@ -1816,12 +2022,16 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
new DeferredInlineBinaryOperation(op,
answer.reg(),
left->reg(),
- right->reg(),
+ distinct_right.reg(),
left_type_info,
right_type_info,
overwrite_mode);
- JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(), answer.reg(),
- left_type_info, right_type_info, deferred);
+ Label non_smi_bit_op;
+ if (op != Token::BIT_OR) {
+ JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(), answer.reg(),
+ left_type_info, right_type_info,
+ deferred->NonSmiInputLabel());
+ }
__ mov(answer.reg(), left->reg());
switch (op) {
@@ -1864,6 +2074,8 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
case Token::BIT_OR:
__ or_(answer.reg(), Operand(right->reg()));
+ __ test(answer.reg(), Immediate(kSmiTagMask));
+ __ j(not_zero, deferred->NonSmiInputLabel());
break;
case Token::BIT_AND:
@@ -1878,6 +2090,7 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
UNREACHABLE();
break;
}
+
deferred->BindExit();
left->Unuse();
right->Unuse();
@@ -2363,27 +2576,25 @@ Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
case Token::BIT_XOR:
case Token::BIT_AND: {
operand->ToRegister();
+ // DeferredInlineBinaryOperation requires all the registers that it is
+ // told about to be spilled.
frame_->Spill(operand->reg());
- DeferredCode* deferred = NULL;
- if (reversed) {
- deferred =
- new DeferredInlineSmiOperationReversed(op,
- operand->reg(),
- smi_value,
- operand->reg(),
- operand->type_info(),
- overwrite_mode);
- } else {
- deferred = new DeferredInlineSmiOperation(op,
- operand->reg(),
- operand->reg(),
- operand->type_info(),
- smi_value,
- overwrite_mode);
- }
+ DeferredInlineBinaryOperation* deferred = NULL;
if (!operand->type_info().IsSmi()) {
+ Result left = allocator()->Allocate();
+ ASSERT(left.is_valid());
+ Result right = allocator()->Allocate();
+ ASSERT(right.is_valid());
+ deferred = new DeferredInlineBinaryOperation(
+ op,
+ operand->reg(),
+ left.reg(),
+ right.reg(),
+ operand->type_info(),
+ TypeInfo::Smi(),
+ overwrite_mode == NO_OVERWRITE ? NO_OVERWRITE : OVERWRITE_LEFT);
__ test(operand->reg(), Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
+ deferred->JumpToConstantRhs(not_zero, smi_value);
} else if (FLAG_debug_code) {
__ AbortIfNotSmi(operand->reg());
}
@@ -2399,7 +2610,7 @@ Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
__ or_(Operand(operand->reg()), Immediate(value));
}
}
- deferred->BindExit();
+ if (deferred != NULL) deferred->BindExit();
answer = *operand;
break;
}
@@ -3212,10 +3423,8 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
__ j(zero, &build_args);
__ CmpObjectType(eax, JS_FUNCTION_TYPE, ecx);
__ j(not_equal, &build_args);
- __ mov(ecx, FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset));
Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
- __ cmp(FieldOperand(ecx, SharedFunctionInfo::kCodeOffset),
- Immediate(apply_code));
+ __ cmp(FieldOperand(eax, JSFunction::kCodeOffset), Immediate(apply_code));
__ j(not_equal, &build_args);
// Check that applicand is a function.
@@ -4389,7 +4598,7 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
__ mov(ebx, Operand(eax));
// If the property has been removed while iterating, we just skip it.
- __ cmp(ebx, Factory::null_value());
+ __ test(ebx, Operand(ebx));
node->continue_target()->Branch(equal);
end_del_check.Bind();
@@ -4397,10 +4606,11 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
// loop. edx: i'th entry of the enum cache (or string there of)
frame_->EmitPush(ebx);
{ Reference each(this, node->each());
- // Loading a reference may leave the frame in an unspilled state.
- frame_->SpillAll();
if (!each.is_illegal()) {
if (each.size() > 0) {
+ // Loading a reference may leave the frame in an unspilled state.
+ frame_->SpillAll();
+ // Get the value (under the reference on the stack) from memory.
frame_->EmitPush(frame_->ElementAt(each.size()));
each.SetValue(NOT_CONST_INIT);
frame_->Drop(2);
@@ -6539,7 +6749,7 @@ void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
}
- void CodeGenerator::GenerateIsSpecObject(ZoneList<Expression*>* args) {
+void CodeGenerator::GenerateIsSpecObject(ZoneList<Expression*>* args) {
// This generates a fast version of:
// (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp' ||
// typeof(arg) == function).
@@ -6560,6 +6770,143 @@ void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
}
+// Deferred code to check whether the String JavaScript object is safe for using
+// default value of. This code is called after the bit caching this information
+// in the map has been checked with the map for the object in the map_result_
+// register. On return the register map_result_ contains 1 for true and 0 for
+// false.
+class DeferredIsStringWrapperSafeForDefaultValueOf : public DeferredCode {
+ public:
+ DeferredIsStringWrapperSafeForDefaultValueOf(Register object,
+ Register map_result,
+ Register scratch1,
+ Register scratch2)
+ : object_(object),
+ map_result_(map_result),
+ scratch1_(scratch1),
+ scratch2_(scratch2) { }
+
+ virtual void Generate() {
+ Label false_result;
+
+ // Check that map is loaded as expected.
+ if (FLAG_debug_code) {
+ __ cmp(map_result_, FieldOperand(object_, HeapObject::kMapOffset));
+ __ Assert(equal, "Map not in expected register");
+ }
+
+ // Check for fast case object. Generate false result for slow case object.
+ __ mov(scratch1_, FieldOperand(object_, JSObject::kPropertiesOffset));
+ __ mov(scratch1_, FieldOperand(scratch1_, HeapObject::kMapOffset));
+ __ cmp(scratch1_, Factory::hash_table_map());
+ __ j(equal, &false_result);
+
+ // Look for valueOf symbol in the descriptor array, and indicate false if
+ // found. The type is not checked, so if it is a transition it is a false
+ // negative.
+ __ mov(map_result_,
+ FieldOperand(map_result_, Map::kInstanceDescriptorsOffset));
+ __ mov(scratch1_, FieldOperand(map_result_, FixedArray::kLengthOffset));
+ // map_result_: descriptor array
+ // scratch1_: length of descriptor array
+ // Calculate the end of the descriptor array.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kPointerSize == 4);
+ __ lea(scratch1_,
+ Operand(map_result_, scratch1_, times_2, FixedArray::kHeaderSize));
+ // Calculate location of the first key name.
+ __ add(Operand(map_result_),
+ Immediate(FixedArray::kHeaderSize +
+ DescriptorArray::kFirstIndex * kPointerSize));
+ // Loop through all the keys in the descriptor array. If one of these is the
+ // symbol valueOf the result is false.
+ Label entry, loop;
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ mov(scratch2_, FieldOperand(map_result_, 0));
+ __ cmp(scratch2_, Factory::value_of_symbol());
+ __ j(equal, &false_result);
+ __ add(Operand(map_result_), Immediate(kPointerSize));
+ __ bind(&entry);
+ __ cmp(map_result_, Operand(scratch1_));
+ __ j(not_equal, &loop);
+
+ // Reload map as register map_result_ was used as temporary above.
+ __ mov(map_result_, FieldOperand(object_, HeapObject::kMapOffset));
+
+ // If a valueOf property is not found on the object check that it's
+ // prototype is the un-modified String prototype. If not result is false.
+ __ mov(scratch1_, FieldOperand(map_result_, Map::kPrototypeOffset));
+ __ test(scratch1_, Immediate(kSmiTagMask));
+ __ j(zero, &false_result);
+ __ mov(scratch1_, FieldOperand(scratch1_, HeapObject::kMapOffset));
+ __ mov(scratch2_, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ mov(scratch2_,
+ FieldOperand(scratch2_, GlobalObject::kGlobalContextOffset));
+ __ cmp(scratch1_,
+ CodeGenerator::ContextOperand(
+ scratch2_, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
+ __ j(not_equal, &false_result);
+ // Set the bit in the map to indicate that it has been checked safe for
+ // default valueOf and set true result.
+ __ or_(FieldOperand(map_result_, Map::kBitField2Offset),
+ Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
+ __ Set(map_result_, Immediate(1));
+ __ jmp(exit_label());
+ __ bind(&false_result);
+ // Set false result.
+ __ Set(map_result_, Immediate(0));
+ }
+
+ private:
+ Register object_;
+ Register map_result_;
+ Register scratch1_;
+ Register scratch2_;
+};
+
+
+void CodeGenerator::GenerateIsStringWrapperSafeForDefaultValueOf(
+ ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ Result obj = frame_->Pop(); // Pop the string wrapper.
+ obj.ToRegister();
+ ASSERT(obj.is_valid());
+ if (FLAG_debug_code) {
+ __ AbortIfSmi(obj.reg());
+ }
+
+ // Check whether this map has already been checked to be safe for default
+ // valueOf.
+ Result map_result = allocator()->Allocate();
+ ASSERT(map_result.is_valid());
+ __ mov(map_result.reg(), FieldOperand(obj.reg(), HeapObject::kMapOffset));
+ __ test_b(FieldOperand(map_result.reg(), Map::kBitField2Offset),
+ 1 << Map::kStringWrapperSafeForDefaultValueOf);
+ destination()->true_target()->Branch(not_zero);
+
+ // We need an additional two scratch registers for the deferred code.
+ Result temp1 = allocator()->Allocate();
+ ASSERT(temp1.is_valid());
+ Result temp2 = allocator()->Allocate();
+ ASSERT(temp2.is_valid());
+
+ DeferredIsStringWrapperSafeForDefaultValueOf* deferred =
+ new DeferredIsStringWrapperSafeForDefaultValueOf(
+ obj.reg(), map_result.reg(), temp1.reg(), temp2.reg());
+ deferred->Branch(zero);
+ deferred->BindExit();
+ __ test(map_result.reg(), Operand(map_result.reg()));
+ obj.Unuse();
+ map_result.Unuse();
+ temp1.Unuse();
+ temp2.Unuse();
+ destination()->Split(not_equal);
+}
+
+
void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
// This generates a fast version of:
// (%_ClassOf(arg) === 'Function')
@@ -9467,6 +9814,11 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
__ mov(FieldOperand(eax, JSFunction::kContextOffset), esi);
__ mov(FieldOperand(eax, JSFunction::kLiteralsOffset), ebx);
+ // Initialize the code pointer in the function to be the one
+ // found in the shared function info object.
+ __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
+ __ mov(FieldOperand(eax, JSFunction::kCodeOffset), edx);
+
// Return and remove the on-stack parameter.
__ ret(1 * kPointerSize);
diff --git a/deps/v8/src/ia32/codegen-ia32.h b/deps/v8/src/ia32/codegen-ia32.h
index 2368b23c9..37b70110c 100644
--- a/deps/v8/src/ia32/codegen-ia32.h
+++ b/deps/v8/src/ia32/codegen-ia32.h
@@ -358,6 +358,10 @@ class CodeGenerator: public AstVisitor {
return FieldOperand(array, index_as_smi, times_half_pointer_size, offset);
}
+ static Operand ContextOperand(Register context, int index) {
+ return Operand(context, Context::SlotOffset(index));
+ }
+
private:
// Construction/Destruction
explicit CodeGenerator(MacroAssembler* masm);
@@ -430,10 +434,6 @@ class CodeGenerator: public AstVisitor {
// The following are used by class Reference.
void LoadReference(Reference* ref);
- static Operand ContextOperand(Register context, int index) {
- return Operand(context, Context::SlotOffset(index));
- }
-
Operand SlotOperand(Slot* slot, Register tmp);
Operand ContextSlotOperandCheckExtensions(Slot* slot,
@@ -530,7 +530,7 @@ class CodeGenerator: public AstVisitor {
// Emits code sequence that jumps to deferred code if the inputs
// are not both smis. Cannot be in MacroAssembler because it takes
- // advantage of TypeInfo to skip unneeded checks.
+ // a deferred code object.
void JumpIfNotBothSmiUsingTypeInfo(Register left,
Register right,
Register scratch,
@@ -538,6 +538,15 @@ class CodeGenerator: public AstVisitor {
TypeInfo right_info,
DeferredCode* deferred);
+ // Emits code sequence that jumps to the label if the inputs
+ // are not both smis.
+ void JumpIfNotBothSmiUsingTypeInfo(Register left,
+ Register right,
+ Register scratch,
+ TypeInfo left_info,
+ TypeInfo right_info,
+ Label* on_non_smi);
+
// If possible, combine two constant smi values using op to produce
// a smi result, and push it on the virtual frame, all at compile time.
// Returns true if it succeeds. Otherwise it has no effect.
@@ -644,6 +653,8 @@ class CodeGenerator: public AstVisitor {
void GenerateIsSpecObject(ZoneList<Expression*>* args);
void GenerateIsFunction(ZoneList<Expression*>* args);
void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
+ void GenerateIsStringWrapperSafeForDefaultValueOf(
+ ZoneList<Expression*>* args);
// Support for construct call checks.
void GenerateIsConstructCall(ZoneList<Expression*>* args);
@@ -802,6 +813,18 @@ class TranscendentalCacheStub: public CodeStub {
};
+class ToBooleanStub: public CodeStub {
+ public:
+ ToBooleanStub() { }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ Major MajorKey() { return ToBoolean; }
+ int MinorKey() { return 0; }
+};
+
+
// Flag that indicates how to generate code for the stub GenericBinaryOpStub.
enum GenericBinaryFlags {
NO_GENERIC_BINARY_FLAGS = 0,
diff --git a/deps/v8/src/ia32/debug-ia32.cc b/deps/v8/src/ia32/debug-ia32.cc
index dfa663420..b57cf3d07 100644
--- a/deps/v8/src/ia32/debug-ia32.cc
+++ b/deps/v8/src/ia32/debug-ia32.cc
@@ -254,32 +254,20 @@ void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
}
-// FrameDropper is a code replacement for a JavaScript frame with possibly
-// several frames above.
-// There is no calling conventions here, because it never actually gets called,
-// it only gets returned to.
-// Frame structure (conforms InternalFrame structure):
-// -- JSFunction
-// -- code
-// -- SMI maker
-// -- context
-// -- frame base
void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
ExternalReference restarter_frame_function_slot =
ExternalReference(Debug_Address::RestarterFrameFunctionPointer());
__ mov(Operand::StaticVariable(restarter_frame_function_slot), Immediate(0));
// We do not know our frame height, but set esp based on ebp.
- __ lea(esp, Operand(ebp, -4 * kPointerSize));
+ __ lea(esp, Operand(ebp, -1 * kPointerSize));
- __ pop(edi); // function
-
- // Skip code self-reference and marker.
- __ add(Operand(esp), Immediate(2 * kPointerSize));
-
- __ pop(esi); // Context.
+ __ pop(edi); // Function.
__ pop(ebp);
+ // Load context from the function.
+ __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+
// Get function code.
__ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
@@ -289,27 +277,9 @@ void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
__ jmp(Operand(edx));
}
-#undef __
-
-
-// TODO(LiveEdit): consider making it platform-independent.
-// TODO(LiveEdit): use more named constants instead of numbers.
-Object** Debug::SetUpFrameDropperFrame(StackFrame* bottom_js_frame,
- Handle<Code> code) {
- ASSERT(bottom_js_frame->is_java_script());
-
- Address fp = bottom_js_frame->fp();
- Memory::Object_at(fp - 4 * kPointerSize) =
- Memory::Object_at(fp - 2 * kPointerSize); // Move edi (function).
-
- Memory::Object_at(fp - 3 * kPointerSize) = *code;
- Memory::Object_at(fp - 2 * kPointerSize) = Smi::FromInt(StackFrame::INTERNAL);
-
- return reinterpret_cast<Object**>(&Memory::Object_at(fp - 4 * kPointerSize));
-}
-
-const int Debug::kFrameDropperFrameSize = 5;
+const bool Debug::kFrameDropperSupported = true;
+#undef __
#endif // ENABLE_DEBUGGER_SUPPORT
diff --git a/deps/v8/src/ia32/disasm-ia32.cc b/deps/v8/src/ia32/disasm-ia32.cc
index dc4c27e88..64305ef69 100644
--- a/deps/v8/src/ia32/disasm-ia32.cc
+++ b/deps/v8/src/ia32/disasm-ia32.cc
@@ -560,6 +560,7 @@ int DisassemblerIA32::D1D3C1Instruction(byte* data) {
case kROL: mnem = "rol"; break;
case kROR: mnem = "ror"; break;
case kRCL: mnem = "rcl"; break;
+ case kRCR: mnem = "rcr"; break;
case kSHL: mnem = "shl"; break;
case KSHR: mnem = "shr"; break;
case kSAR: mnem = "sar"; break;
diff --git a/deps/v8/src/ia32/fast-codegen-ia32.cc b/deps/v8/src/ia32/fast-codegen-ia32.cc
deleted file mode 100644
index b749e594b..000000000
--- a/deps/v8/src/ia32/fast-codegen-ia32.cc
+++ /dev/null
@@ -1,954 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "codegen-inl.h"
-#include "fast-codegen.h"
-#include "data-flow.h"
-#include "scopes.h"
-
-namespace v8 {
-namespace internal {
-
-#define BAILOUT(reason) \
- do { \
- if (FLAG_trace_bailout) { \
- PrintF("%s\n", reason); \
- } \
- has_supported_syntax_ = false; \
- return; \
- } while (false)
-
-
-#define CHECK_BAILOUT \
- do { \
- if (!has_supported_syntax_) return; \
- } while (false)
-
-
-void FastCodeGenSyntaxChecker::Check(CompilationInfo* info) {
- info_ = info;
-
- // We do not specialize if we do not have a receiver or if it is not a
- // JS object with fast mode properties.
- if (!info->has_receiver()) BAILOUT("No receiver");
- if (!info->receiver()->IsJSObject()) BAILOUT("Receiver is not an object");
- Handle<JSObject> object = Handle<JSObject>::cast(info->receiver());
- if (!object->HasFastProperties()) BAILOUT("Receiver is in dictionary mode");
-
- // We do not support stack or heap slots (both of which require
- // allocation).
- Scope* scope = info->scope();
- if (scope->num_stack_slots() > 0) {
- BAILOUT("Function has stack-allocated locals");
- }
- if (scope->num_heap_slots() > 0) {
- BAILOUT("Function has context-allocated locals");
- }
-
- VisitDeclarations(scope->declarations());
- CHECK_BAILOUT;
-
- // We do not support empty function bodies.
- if (info->function()->body()->is_empty()) {
- BAILOUT("Function has an empty body");
- }
- VisitStatements(info->function()->body());
-}
-
-
-void FastCodeGenSyntaxChecker::VisitDeclarations(
- ZoneList<Declaration*>* decls) {
- if (!decls->is_empty()) BAILOUT("Function has declarations");
-}
-
-
-void FastCodeGenSyntaxChecker::VisitStatements(ZoneList<Statement*>* stmts) {
- if (stmts->length() != 1) {
- BAILOUT("Function body is not a singleton statement.");
- }
- Visit(stmts->at(0));
-}
-
-
-void FastCodeGenSyntaxChecker::VisitDeclaration(Declaration* decl) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenSyntaxChecker::VisitBlock(Block* stmt) {
- VisitStatements(stmt->statements());
-}
-
-
-void FastCodeGenSyntaxChecker::VisitExpressionStatement(
- ExpressionStatement* stmt) {
- Visit(stmt->expression());
-}
-
-
-void FastCodeGenSyntaxChecker::VisitEmptyStatement(EmptyStatement* stmt) {
- // Supported.
-}
-
-
-void FastCodeGenSyntaxChecker::VisitIfStatement(IfStatement* stmt) {
- BAILOUT("IfStatement");
-}
-
-
-void FastCodeGenSyntaxChecker::VisitContinueStatement(ContinueStatement* stmt) {
- BAILOUT("Continuestatement");
-}
-
-
-void FastCodeGenSyntaxChecker::VisitBreakStatement(BreakStatement* stmt) {
- BAILOUT("BreakStatement");
-}
-
-
-void FastCodeGenSyntaxChecker::VisitReturnStatement(ReturnStatement* stmt) {
- BAILOUT("ReturnStatement");
-}
-
-
-void FastCodeGenSyntaxChecker::VisitWithEnterStatement(
- WithEnterStatement* stmt) {
- BAILOUT("WithEnterStatement");
-}
-
-
-void FastCodeGenSyntaxChecker::VisitWithExitStatement(WithExitStatement* stmt) {
- BAILOUT("WithExitStatement");
-}
-
-
-void FastCodeGenSyntaxChecker::VisitSwitchStatement(SwitchStatement* stmt) {
- BAILOUT("SwitchStatement");
-}
-
-
-void FastCodeGenSyntaxChecker::VisitDoWhileStatement(DoWhileStatement* stmt) {
- BAILOUT("DoWhileStatement");
-}
-
-
-void FastCodeGenSyntaxChecker::VisitWhileStatement(WhileStatement* stmt) {
- BAILOUT("WhileStatement");
-}
-
-
-void FastCodeGenSyntaxChecker::VisitForStatement(ForStatement* stmt) {
- BAILOUT("ForStatement");
-}
-
-
-void FastCodeGenSyntaxChecker::VisitForInStatement(ForInStatement* stmt) {
- BAILOUT("ForInStatement");
-}
-
-
-void FastCodeGenSyntaxChecker::VisitTryCatchStatement(TryCatchStatement* stmt) {
- BAILOUT("TryCatchStatement");
-}
-
-
-void FastCodeGenSyntaxChecker::VisitTryFinallyStatement(
- TryFinallyStatement* stmt) {
- BAILOUT("TryFinallyStatement");
-}
-
-
-void FastCodeGenSyntaxChecker::VisitDebuggerStatement(
- DebuggerStatement* stmt) {
- BAILOUT("DebuggerStatement");
-}
-
-
-void FastCodeGenSyntaxChecker::VisitFunctionLiteral(FunctionLiteral* expr) {
- BAILOUT("FunctionLiteral");
-}
-
-
-void FastCodeGenSyntaxChecker::VisitSharedFunctionInfoLiteral(
- SharedFunctionInfoLiteral* expr) {
- BAILOUT("SharedFunctionInfoLiteral");
-}
-
-
-void FastCodeGenSyntaxChecker::VisitConditional(Conditional* expr) {
- BAILOUT("Conditional");
-}
-
-
-void FastCodeGenSyntaxChecker::VisitSlot(Slot* expr) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenSyntaxChecker::VisitVariableProxy(VariableProxy* expr) {
- // Only global variable references are supported.
- Variable* var = expr->var();
- if (!var->is_global() || var->is_this()) BAILOUT("Non-global variable");
-
- // Check if the global variable is existing and non-deletable.
- if (info()->has_global_object()) {
- LookupResult lookup;
- info()->global_object()->Lookup(*expr->name(), &lookup);
- if (!lookup.IsProperty()) {
- BAILOUT("Non-existing global variable");
- }
- // We do not handle global variables with accessors or interceptors.
- if (lookup.type() != NORMAL) {
- BAILOUT("Global variable with accessors or interceptors.");
- }
- // We do not handle deletable global variables.
- if (!lookup.IsDontDelete()) {
- BAILOUT("Deletable global variable");
- }
- }
-}
-
-
-void FastCodeGenSyntaxChecker::VisitLiteral(Literal* expr) {
- BAILOUT("Literal");
-}
-
-
-void FastCodeGenSyntaxChecker::VisitRegExpLiteral(RegExpLiteral* expr) {
- BAILOUT("RegExpLiteral");
-}
-
-
-void FastCodeGenSyntaxChecker::VisitObjectLiteral(ObjectLiteral* expr) {
- BAILOUT("ObjectLiteral");
-}
-
-
-void FastCodeGenSyntaxChecker::VisitArrayLiteral(ArrayLiteral* expr) {
- BAILOUT("ArrayLiteral");
-}
-
-
-void FastCodeGenSyntaxChecker::VisitCatchExtensionObject(
- CatchExtensionObject* expr) {
- BAILOUT("CatchExtensionObject");
-}
-
-
-void FastCodeGenSyntaxChecker::VisitAssignment(Assignment* expr) {
- // Simple assignments to (named) this properties are supported.
- if (expr->op() != Token::ASSIGN) BAILOUT("Non-simple assignment");
-
- Property* prop = expr->target()->AsProperty();
- if (prop == NULL) BAILOUT("Non-property assignment");
- VariableProxy* proxy = prop->obj()->AsVariableProxy();
- if (proxy == NULL || !proxy->var()->is_this()) {
- BAILOUT("Non-this-property assignment");
- }
- if (!prop->key()->IsPropertyName()) {
- BAILOUT("Non-named-property assignment");
- }
-
- // We will only specialize for fields on the object itself.
- // Expression::IsPropertyName implies that the name is a literal
- // symbol but we do not assume that.
- Literal* key = prop->key()->AsLiteral();
- if (key != NULL && key->handle()->IsString()) {
- Handle<Object> receiver = info()->receiver();
- Handle<String> name = Handle<String>::cast(key->handle());
- LookupResult lookup;
- receiver->Lookup(*name, &lookup);
- if (!lookup.IsProperty()) {
- BAILOUT("Assigned property not found at compile time");
- }
- if (lookup.holder() != *receiver) BAILOUT("Non-own property assignment");
- if (!lookup.type() == FIELD) BAILOUT("Non-field property assignment");
- } else {
- UNREACHABLE();
- BAILOUT("Unexpected non-string-literal property key");
- }
-
- Visit(expr->value());
-}
-
-
-void FastCodeGenSyntaxChecker::VisitThrow(Throw* expr) {
- BAILOUT("Throw");
-}
-
-
-void FastCodeGenSyntaxChecker::VisitProperty(Property* expr) {
- // We support named this property references.
- VariableProxy* proxy = expr->obj()->AsVariableProxy();
- if (proxy == NULL || !proxy->var()->is_this()) {
- BAILOUT("Non-this-property reference");
- }
- if (!expr->key()->IsPropertyName()) {
- BAILOUT("Non-named-property reference");
- }
-
- // We will only specialize for fields on the object itself.
- // Expression::IsPropertyName implies that the name is a literal
- // symbol but we do not assume that.
- Literal* key = expr->key()->AsLiteral();
- if (key != NULL && key->handle()->IsString()) {
- Handle<Object> receiver = info()->receiver();
- Handle<String> name = Handle<String>::cast(key->handle());
- LookupResult lookup;
- receiver->Lookup(*name, &lookup);
- if (!lookup.IsProperty()) {
- BAILOUT("Referenced property not found at compile time");
- }
- if (lookup.holder() != *receiver) BAILOUT("Non-own property reference");
- if (!lookup.type() == FIELD) BAILOUT("Non-field property reference");
- } else {
- UNREACHABLE();
- BAILOUT("Unexpected non-string-literal property key");
- }
-}
-
-
-void FastCodeGenSyntaxChecker::VisitCall(Call* expr) {
- BAILOUT("Call");
-}
-
-
-void FastCodeGenSyntaxChecker::VisitCallNew(CallNew* expr) {
- BAILOUT("CallNew");
-}
-
-
-void FastCodeGenSyntaxChecker::VisitCallRuntime(CallRuntime* expr) {
- BAILOUT("CallRuntime");
-}
-
-
-void FastCodeGenSyntaxChecker::VisitUnaryOperation(UnaryOperation* expr) {
- BAILOUT("UnaryOperation");
-}
-
-
-void FastCodeGenSyntaxChecker::VisitCountOperation(CountOperation* expr) {
- BAILOUT("CountOperation");
-}
-
-
-void FastCodeGenSyntaxChecker::VisitBinaryOperation(BinaryOperation* expr) {
- // We support bitwise OR.
- switch (expr->op()) {
- case Token::COMMA:
- BAILOUT("BinaryOperation COMMA");
- case Token::OR:
- BAILOUT("BinaryOperation OR");
- case Token::AND:
- BAILOUT("BinaryOperation AND");
-
- case Token::BIT_OR:
- // We support expressions nested on the left because they only require
- // a pair of registers to keep all intermediate values in registers
- // (i.e., the expression stack has height no more than two).
- if (!expr->right()->IsLeaf()) BAILOUT("expression nested on right");
-
- // We do not allow subexpressions with side effects because we
- // (currently) bail out to the beginning of the full function. The
- // only expressions with side effects that we would otherwise handle
- // are assignments.
- if (expr->left()->AsAssignment() != NULL ||
- expr->right()->AsAssignment() != NULL) {
- BAILOUT("subexpression of binary operation has side effects");
- }
-
- Visit(expr->left());
- CHECK_BAILOUT;
- Visit(expr->right());
- break;
-
- case Token::BIT_XOR:
- BAILOUT("BinaryOperation BIT_XOR");
- case Token::BIT_AND:
- BAILOUT("BinaryOperation BIT_AND");
- case Token::SHL:
- BAILOUT("BinaryOperation SHL");
- case Token::SAR:
- BAILOUT("BinaryOperation SAR");
- case Token::SHR:
- BAILOUT("BinaryOperation SHR");
- case Token::ADD:
- BAILOUT("BinaryOperation ADD");
- case Token::SUB:
- BAILOUT("BinaryOperation SUB");
- case Token::MUL:
- BAILOUT("BinaryOperation MUL");
- case Token::DIV:
- BAILOUT("BinaryOperation DIV");
- case Token::MOD:
- BAILOUT("BinaryOperation MOD");
- default:
- UNREACHABLE();
- }
-}
-
-
-void FastCodeGenSyntaxChecker::VisitCompareOperation(CompareOperation* expr) {
- BAILOUT("CompareOperation");
-}
-
-
-void FastCodeGenSyntaxChecker::VisitThisFunction(ThisFunction* expr) {
- BAILOUT("ThisFunction");
-}
-
-#undef BAILOUT
-#undef CHECK_BAILOUT
-
-
-#define __ ACCESS_MASM(masm())
-
-Handle<Code> FastCodeGenerator::MakeCode(CompilationInfo* info) {
- // Label the AST before calling MakeCodePrologue, so AST node numbers are
- // printed with the AST.
- AstLabeler labeler;
- labeler.Label(info);
-
- CodeGenerator::MakeCodePrologue(info);
-
- const int kInitialBufferSize = 4 * KB;
- MacroAssembler masm(NULL, kInitialBufferSize);
-
- // Generate the fast-path code.
- FastCodeGenerator fast_cgen(&masm);
- fast_cgen.Generate(info);
- if (fast_cgen.HasStackOverflow()) {
- ASSERT(!Top::has_pending_exception());
- return Handle<Code>::null();
- }
-
- // Generate the full code for the function in bailout mode, using the same
- // macro assembler.
- CodeGenerator cgen(&masm);
- CodeGeneratorScope scope(&cgen);
- info->set_mode(CompilationInfo::SECONDARY);
- cgen.Generate(info);
- if (cgen.HasStackOverflow()) {
- ASSERT(!Top::has_pending_exception());
- return Handle<Code>::null();
- }
-
- Code::Flags flags = Code::ComputeFlags(Code::FUNCTION, NOT_IN_LOOP);
- return CodeGenerator::MakeCodeEpilogue(&masm, flags, info);
-}
-
-
-Register FastCodeGenerator::accumulator0() { return eax; }
-Register FastCodeGenerator::accumulator1() { return edx; }
-Register FastCodeGenerator::scratch0() { return ecx; }
-Register FastCodeGenerator::scratch1() { return edi; }
-Register FastCodeGenerator::receiver_reg() { return ebx; }
-Register FastCodeGenerator::context_reg() { return esi; }
-
-
-void FastCodeGenerator::EmitLoadReceiver() {
- // Offset 2 is due to return address and saved frame pointer.
- int index = 2 + function()->scope()->num_parameters();
- __ mov(receiver_reg(), Operand(ebp, index * kPointerSize));
-}
-
-
-void FastCodeGenerator::EmitGlobalVariableLoad(Handle<Object> cell) {
- ASSERT(!destination().is(no_reg));
- ASSERT(cell->IsJSGlobalPropertyCell());
-
- __ mov(destination(), Immediate(cell));
- __ mov(destination(),
- FieldOperand(destination(), JSGlobalPropertyCell::kValueOffset));
- if (FLAG_debug_code) {
- __ cmp(destination(), Factory::the_hole_value());
- __ Check(not_equal, "DontDelete cells can't contain the hole");
- }
-
- // The loaded value is not known to be a smi.
- clear_as_smi(destination());
-}
-
-
-void FastCodeGenerator::EmitThisPropertyStore(Handle<String> name) {
- LookupResult lookup;
- info()->receiver()->Lookup(*name, &lookup);
-
- ASSERT(lookup.holder() == *info()->receiver());
- ASSERT(lookup.type() == FIELD);
- Handle<Map> map(Handle<HeapObject>::cast(info()->receiver())->map());
- int index = lookup.GetFieldIndex() - map->inobject_properties();
- int offset = index * kPointerSize;
-
- // We will emit the write barrier unless the stored value is statically
- // known to be a smi.
- bool needs_write_barrier = !is_smi(accumulator0());
-
- // Perform the store. Negative offsets are inobject properties.
- if (offset < 0) {
- offset += map->instance_size();
- __ mov(FieldOperand(receiver_reg(), offset), accumulator0());
- if (needs_write_barrier) {
- // Preserve receiver from write barrier.
- __ mov(scratch0(), receiver_reg());
- }
- } else {
- offset += FixedArray::kHeaderSize;
- __ mov(scratch0(),
- FieldOperand(receiver_reg(), JSObject::kPropertiesOffset));
- __ mov(FieldOperand(scratch0(), offset), accumulator0());
- }
-
- if (needs_write_barrier) {
- if (destination().is(no_reg)) {
- // After RecordWrite accumulator0 is only accidently a smi, but it is
- // already marked as not known to be one.
- __ RecordWrite(scratch0(), offset, accumulator0(), scratch1());
- } else {
- // Copy the value to the other accumulator to preserve a copy from the
- // write barrier. One of the accumulators is available as a scratch
- // register. Neither is a smi.
- __ mov(accumulator1(), accumulator0());
- clear_as_smi(accumulator1());
- Register value_scratch = other_accumulator(destination());
- __ RecordWrite(scratch0(), offset, value_scratch, scratch1());
- }
- } else if (destination().is(accumulator1())) {
- __ mov(accumulator1(), accumulator0());
- // Is a smi because we do not need the write barrier.
- set_as_smi(accumulator1());
- }
-}
-
-
-void FastCodeGenerator::EmitThisPropertyLoad(Handle<String> name) {
- ASSERT(!destination().is(no_reg));
- LookupResult lookup;
- info()->receiver()->Lookup(*name, &lookup);
-
- ASSERT(lookup.holder() == *info()->receiver());
- ASSERT(lookup.type() == FIELD);
- Handle<Map> map(Handle<HeapObject>::cast(info()->receiver())->map());
- int index = lookup.GetFieldIndex() - map->inobject_properties();
- int offset = index * kPointerSize;
-
- // Perform the load. Negative offsets are inobject properties.
- if (offset < 0) {
- offset += map->instance_size();
- __ mov(destination(), FieldOperand(receiver_reg(), offset));
- } else {
- offset += FixedArray::kHeaderSize;
- __ mov(scratch0(),
- FieldOperand(receiver_reg(), JSObject::kPropertiesOffset));
- __ mov(destination(), FieldOperand(scratch0(), offset));
- }
-
- // The loaded value is not known to be a smi.
- clear_as_smi(destination());
-}
-
-
-void FastCodeGenerator::EmitBitOr() {
- if (is_smi(accumulator0()) && is_smi(accumulator1())) {
- // If both operands are known to be a smi then there is no need to check
- // the operands or result. There is no need to perform the operation in
- // an effect context.
- if (!destination().is(no_reg)) {
- // Leave the result in the destination register. Bitwise or is
- // commutative.
- __ or_(destination(), Operand(other_accumulator(destination())));
- }
- } else {
- // Left is in accumulator1, right in accumulator0.
- Label* bailout = NULL;
- if (destination().is(accumulator0())) {
- __ mov(scratch0(), accumulator0());
- __ or_(destination(), Operand(accumulator1())); // Or is commutative.
- __ test(destination(), Immediate(kSmiTagMask));
- bailout = info()->AddBailout(accumulator1(), scratch0()); // Left, right.
- } else if (destination().is(accumulator1())) {
- __ mov(scratch0(), accumulator1());
- __ or_(destination(), Operand(accumulator0()));
- __ test(destination(), Immediate(kSmiTagMask));
- bailout = info()->AddBailout(scratch0(), accumulator0());
- } else {
- ASSERT(destination().is(no_reg));
- __ mov(scratch0(), accumulator1());
- __ or_(scratch0(), Operand(accumulator0()));
- __ test(scratch0(), Immediate(kSmiTagMask));
- bailout = info()->AddBailout(accumulator1(), accumulator0());
- }
- __ j(not_zero, bailout, not_taken);
- }
-
- // If we didn't bailout, the result (in fact, both inputs too) is known to
- // be a smi.
- set_as_smi(accumulator0());
- set_as_smi(accumulator1());
-}
-
-
-void FastCodeGenerator::Generate(CompilationInfo* compilation_info) {
- ASSERT(info_ == NULL);
- info_ = compilation_info;
- Comment cmnt(masm_, "[ function compiled by fast code generator");
-
- // Save the caller's frame pointer and set up our own.
- Comment prologue_cmnt(masm(), ";; Prologue");
- __ push(ebp);
- __ mov(ebp, esp);
- __ push(esi); // Context.
- __ push(edi); // Closure.
- // Note that we keep a live register reference to esi (context) at this
- // point.
-
- Label* bailout_to_beginning = info()->AddBailout();
- // Receiver (this) is allocated to a fixed register.
- if (info()->has_this_properties()) {
- Comment cmnt(masm(), ";; MapCheck(this)");
- if (FLAG_print_ir) {
- PrintF("#: MapCheck(this)\n");
- }
- ASSERT(info()->has_receiver() && info()->receiver()->IsHeapObject());
- Handle<HeapObject> object = Handle<HeapObject>::cast(info()->receiver());
- Handle<Map> map(object->map());
- EmitLoadReceiver();
- __ CheckMap(receiver_reg(), map, bailout_to_beginning, false);
- }
-
- // If there is a global variable access check if the global object is the
- // same as at lazy-compilation time.
- if (info()->has_globals()) {
- Comment cmnt(masm(), ";; MapCheck(GLOBAL)");
- if (FLAG_print_ir) {
- PrintF("#: MapCheck(GLOBAL)\n");
- }
- ASSERT(info()->has_global_object());
- Handle<Map> map(info()->global_object()->map());
- __ mov(scratch0(), CodeGenerator::GlobalObject());
- __ CheckMap(scratch0(), map, bailout_to_beginning, true);
- }
-
- VisitStatements(function()->body());
-
- Comment return_cmnt(masm(), ";; Return(<undefined>)");
- if (FLAG_print_ir) {
- PrintF("#: Return(<undefined>)\n");
- }
- __ mov(eax, Factory::undefined_value());
- __ mov(esp, ebp);
- __ pop(ebp);
- __ ret((scope()->num_parameters() + 1) * kPointerSize);
-}
-
-
-void FastCodeGenerator::VisitDeclaration(Declaration* decl) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitBlock(Block* stmt) {
- VisitStatements(stmt->statements());
-}
-
-
-void FastCodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) {
- Visit(stmt->expression());
-}
-
-
-void FastCodeGenerator::VisitEmptyStatement(EmptyStatement* stmt) {
- // Nothing to do.
-}
-
-
-void FastCodeGenerator::VisitIfStatement(IfStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitContinueStatement(ContinueStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitBreakStatement(BreakStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitWithEnterStatement(WithEnterStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitWithExitStatement(WithExitStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitForStatement(ForStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitDebuggerStatement(DebuggerStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitSharedFunctionInfoLiteral(
- SharedFunctionInfoLiteral* expr) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitConditional(Conditional* expr) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitSlot(Slot* expr) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
- ASSERT(expr->var()->is_global() && !expr->var()->is_this());
- // Check if we can compile a global variable load directly from the cell.
- ASSERT(info()->has_global_object());
- LookupResult lookup;
- info()->global_object()->Lookup(*expr->name(), &lookup);
- // We only support normal (non-accessor/interceptor) DontDelete properties
- // for now.
- ASSERT(lookup.IsProperty());
- ASSERT_EQ(NORMAL, lookup.type());
- ASSERT(lookup.IsDontDelete());
- Handle<Object> cell(info()->global_object()->GetPropertyCell(&lookup));
-
- // Global variable lookups do not have side effects, so we do not need to
- // emit code if we are in an effect context.
- if (!destination().is(no_reg)) {
- Comment cmnt(masm(), ";; Global");
- if (FLAG_print_ir) {
- SmartPointer<char> name = expr->name()->ToCString();
- PrintF("%d: t%d = Global(%s)\n", expr->num(),
- expr->num(), *name);
- }
- EmitGlobalVariableLoad(cell);
- }
-}
-
-
-void FastCodeGenerator::VisitLiteral(Literal* expr) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* expr) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitAssignment(Assignment* expr) {
- // Known to be a simple this property assignment. Effectively a unary
- // operation.
- { Register my_destination = destination();
- set_destination(accumulator0());
- Visit(expr->value());
- set_destination(my_destination);
- }
-
- Property* prop = expr->target()->AsProperty();
- ASSERT_NOT_NULL(prop);
- ASSERT_NOT_NULL(prop->obj()->AsVariableProxy());
- ASSERT(prop->obj()->AsVariableProxy()->var()->is_this());
- ASSERT(prop->key()->IsPropertyName());
- Handle<String> name =
- Handle<String>::cast(prop->key()->AsLiteral()->handle());
-
- Comment cmnt(masm(), ";; Store to this");
- if (FLAG_print_ir) {
- SmartPointer<char> name_string = name->ToCString();
- PrintF("%d: ", expr->num());
- if (!destination().is(no_reg)) PrintF("t%d = ", expr->num());
- PrintF("Store(this, \"%s\", t%d)\n", *name_string,
- expr->value()->num());
- }
-
- EmitThisPropertyStore(name);
-}
-
-
-void FastCodeGenerator::VisitThrow(Throw* expr) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitProperty(Property* expr) {
- ASSERT_NOT_NULL(expr->obj()->AsVariableProxy());
- ASSERT(expr->obj()->AsVariableProxy()->var()->is_this());
- ASSERT(expr->key()->IsPropertyName());
- if (!destination().is(no_reg)) {
- Handle<String> name =
- Handle<String>::cast(expr->key()->AsLiteral()->handle());
-
- Comment cmnt(masm(), ";; Load from this");
- if (FLAG_print_ir) {
- SmartPointer<char> name_string = name->ToCString();
- PrintF("%d: t%d = Load(this, \"%s\")\n",
- expr->num(), expr->num(), *name_string);
- }
- EmitThisPropertyLoad(name);
- }
-}
-
-
-void FastCodeGenerator::VisitCall(Call* expr) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitCallNew(CallNew* expr) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitCountOperation(CountOperation* expr) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
- // We support limited binary operations: bitwise OR only allowed to be
- // nested on the left.
- ASSERT(expr->op() == Token::BIT_OR);
- ASSERT(expr->right()->IsLeaf());
-
- { Register my_destination = destination();
- set_destination(accumulator1());
- Visit(expr->left());
- set_destination(accumulator0());
- Visit(expr->right());
- set_destination(my_destination);
- }
-
- Comment cmnt(masm(), ";; BIT_OR");
- if (FLAG_print_ir) {
- PrintF("%d: ", expr->num());
- if (!destination().is(no_reg)) PrintF("t%d = ", expr->num());
- PrintF("BIT_OR(t%d, t%d)\n", expr->left()->num(), expr->right()->num());
- }
- EmitBitOr();
-}
-
-
-void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitThisFunction(ThisFunction* expr) {
- UNREACHABLE();
-}
-
-#undef __
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ia32/fast-codegen-ia32.h b/deps/v8/src/ia32/fast-codegen-ia32.h
deleted file mode 100644
index e0851afe0..000000000
--- a/deps/v8/src/ia32/fast-codegen-ia32.h
+++ /dev/null
@@ -1,155 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_FAST_CODEGEN_IA32_H_
-#define V8_FAST_CODEGEN_IA32_H_
-
-#include "v8.h"
-
-#include "ast.h"
-#include "compiler.h"
-#include "list.h"
-
-namespace v8 {
-namespace internal {
-
-class FastCodeGenSyntaxChecker: public AstVisitor {
- public:
- explicit FastCodeGenSyntaxChecker()
- : info_(NULL), has_supported_syntax_(true) {
- }
-
- void Check(CompilationInfo* info);
-
- CompilationInfo* info() { return info_; }
- bool has_supported_syntax() { return has_supported_syntax_; }
-
- private:
- void VisitDeclarations(ZoneList<Declaration*>* decls);
- void VisitStatements(ZoneList<Statement*>* stmts);
-
- // AST node visit functions.
-#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
- AST_NODE_LIST(DECLARE_VISIT)
-#undef DECLARE_VISIT
-
- CompilationInfo* info_;
- bool has_supported_syntax_;
-
- DISALLOW_COPY_AND_ASSIGN(FastCodeGenSyntaxChecker);
-};
-
-
-class FastCodeGenerator: public AstVisitor {
- public:
- explicit FastCodeGenerator(MacroAssembler* masm)
- : masm_(masm), info_(NULL), destination_(no_reg), smi_bits_(0) {
- }
-
- static Handle<Code> MakeCode(CompilationInfo* info);
-
- void Generate(CompilationInfo* compilation_info);
-
- private:
- MacroAssembler* masm() { return masm_; }
- CompilationInfo* info() { return info_; }
-
- Register destination() { return destination_; }
- void set_destination(Register reg) { destination_ = reg; }
-
- FunctionLiteral* function() { return info_->function(); }
- Scope* scope() { return info_->scope(); }
-
- // Platform-specific fixed registers, all guaranteed distinct.
- Register accumulator0();
- Register accumulator1();
- Register scratch0();
- Register scratch1();
- Register receiver_reg();
- Register context_reg();
-
- Register other_accumulator(Register reg) {
- ASSERT(reg.is(accumulator0()) || reg.is(accumulator1()));
- return (reg.is(accumulator0())) ? accumulator1() : accumulator0();
- }
-
- // Flags are true if the respective register is statically known to hold a
- // smi. We do not track every register, only the accumulator registers.
- bool is_smi(Register reg) {
- ASSERT(!reg.is(no_reg));
- return (smi_bits_ & reg.bit()) != 0;
- }
- void set_as_smi(Register reg) {
- ASSERT(!reg.is(no_reg));
- smi_bits_ = smi_bits_ | reg.bit();
- }
- void clear_as_smi(Register reg) {
- ASSERT(!reg.is(no_reg));
- smi_bits_ = smi_bits_ & ~reg.bit();
- }
-
- // AST node visit functions.
-#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
- AST_NODE_LIST(DECLARE_VISIT)
-#undef DECLARE_VISIT
-
- // Emit code to load the receiver from the stack into receiver_reg.
- void EmitLoadReceiver();
-
- // Emit code to load a global variable directly from a global property
- // cell into the destination register.
- void EmitGlobalVariableLoad(Handle<Object> cell);
-
- // Emit a store to an own property of this. The stored value is expected
- // in accumulator0 and the receiver in receiver_reg. The receiver
- // register is preserved and the result (the stored value) is left in the
- // destination register.
- void EmitThisPropertyStore(Handle<String> name);
-
- // Emit a load from an own property of this. The receiver is expected in
- // receiver_reg. The receiver register is preserved and the result is
- // left in the destination register.
- void EmitThisPropertyLoad(Handle<String> name);
-
- // Emit a bitwise or operation. The left operand is in accumulator1 and
- // the right is in accumulator0. The result should be left in the
- // destination register.
- void EmitBitOr();
-
- MacroAssembler* masm_;
- CompilationInfo* info_;
-
- Register destination_;
- uint32_t smi_bits_;
-
- DISALLOW_COPY_AND_ASSIGN(FastCodeGenerator);
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_FAST_CODEGEN_IA32_H_
diff --git a/deps/v8/src/ia32/full-codegen-ia32.cc b/deps/v8/src/ia32/full-codegen-ia32.cc
index eb944e6f7..cb36904ee 100644
--- a/deps/v8/src/ia32/full-codegen-ia32.cc
+++ b/deps/v8/src/ia32/full-codegen-ia32.cc
@@ -54,97 +54,95 @@ namespace internal {
//
// The function builds a JS frame. Please see JavaScriptFrameConstants in
// frames-ia32.h for its layout.
-void FullCodeGenerator::Generate(CompilationInfo* info, Mode mode) {
+void FullCodeGenerator::Generate(CompilationInfo* info) {
ASSERT(info_ == NULL);
info_ = info;
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
- if (mode == PRIMARY) {
- __ push(ebp); // Caller's frame pointer.
- __ mov(ebp, esp);
- __ push(esi); // Callee's context.
- __ push(edi); // Callee's JS Function.
-
- { Comment cmnt(masm_, "[ Allocate locals");
- int locals_count = scope()->num_stack_slots();
- if (locals_count == 1) {
- __ push(Immediate(Factory::undefined_value()));
- } else if (locals_count > 1) {
- __ mov(eax, Immediate(Factory::undefined_value()));
- for (int i = 0; i < locals_count; i++) {
- __ push(eax);
- }
+ __ push(ebp); // Caller's frame pointer.
+ __ mov(ebp, esp);
+ __ push(esi); // Callee's context.
+ __ push(edi); // Callee's JS Function.
+
+ { Comment cmnt(masm_, "[ Allocate locals");
+ int locals_count = scope()->num_stack_slots();
+ if (locals_count == 1) {
+ __ push(Immediate(Factory::undefined_value()));
+ } else if (locals_count > 1) {
+ __ mov(eax, Immediate(Factory::undefined_value()));
+ for (int i = 0; i < locals_count; i++) {
+ __ push(eax);
}
}
+ }
- bool function_in_register = true;
+ bool function_in_register = true;
- // Possibly allocate a local context.
- int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0) {
- Comment cmnt(masm_, "[ Allocate local context");
- // Argument to NewContext is the function, which is still in edi.
- __ push(edi);
- if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(heap_slots);
- __ CallStub(&stub);
- } else {
- __ CallRuntime(Runtime::kNewContext, 1);
- }
- function_in_register = false;
- // Context is returned in both eax and esi. It replaces the context
- // passed to us. It's saved in the stack and kept live in esi.
- __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
-
- // Copy parameters into context if necessary.
- int num_parameters = scope()->num_parameters();
- for (int i = 0; i < num_parameters; i++) {
- Slot* slot = scope()->parameter(i)->slot();
- if (slot != NULL && slot->type() == Slot::CONTEXT) {
- int parameter_offset = StandardFrameConstants::kCallerSPOffset +
- (num_parameters - 1 - i) * kPointerSize;
- // Load parameter from stack.
- __ mov(eax, Operand(ebp, parameter_offset));
- // Store it in the context.
- int context_offset = Context::SlotOffset(slot->index());
- __ mov(Operand(esi, context_offset), eax);
- // Update the write barrier. This clobbers all involved
- // registers, so we have use a third register to avoid
- // clobbering esi.
- __ mov(ecx, esi);
- __ RecordWrite(ecx, context_offset, eax, ebx);
- }
+ // Possibly allocate a local context.
+ int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ if (heap_slots > 0) {
+ Comment cmnt(masm_, "[ Allocate local context");
+ // Argument to NewContext is the function, which is still in edi.
+ __ push(edi);
+ if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(heap_slots);
+ __ CallStub(&stub);
+ } else {
+ __ CallRuntime(Runtime::kNewContext, 1);
+ }
+ function_in_register = false;
+ // Context is returned in both eax and esi. It replaces the context
+ // passed to us. It's saved in the stack and kept live in esi.
+ __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
+
+ // Copy parameters into context if necessary.
+ int num_parameters = scope()->num_parameters();
+ for (int i = 0; i < num_parameters; i++) {
+ Slot* slot = scope()->parameter(i)->slot();
+ if (slot != NULL && slot->type() == Slot::CONTEXT) {
+ int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+ (num_parameters - 1 - i) * kPointerSize;
+ // Load parameter from stack.
+ __ mov(eax, Operand(ebp, parameter_offset));
+ // Store it in the context.
+ int context_offset = Context::SlotOffset(slot->index());
+ __ mov(Operand(esi, context_offset), eax);
+ // Update the write barrier. This clobbers all involved
+ // registers, so we have use a third register to avoid
+ // clobbering esi.
+ __ mov(ecx, esi);
+ __ RecordWrite(ecx, context_offset, eax, ebx);
}
}
+ }
- Variable* arguments = scope()->arguments()->AsVariable();
- if (arguments != NULL) {
- // Function uses arguments object.
- Comment cmnt(masm_, "[ Allocate arguments object");
- if (function_in_register) {
- __ push(edi);
- } else {
- __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- }
- // Receiver is just before the parameters on the caller's stack.
- int offset = scope()->num_parameters() * kPointerSize;
- __ lea(edx,
- Operand(ebp, StandardFrameConstants::kCallerSPOffset + offset));
- __ push(edx);
- __ push(Immediate(Smi::FromInt(scope()->num_parameters())));
- // Arguments to ArgumentsAccessStub:
- // function, receiver address, parameter count.
- // The stub will rewrite receiver and parameter count if the previous
- // stack frame was an arguments adapter frame.
- ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
- __ CallStub(&stub);
- __ mov(ecx, eax); // Duplicate result.
- Move(arguments->slot(), eax, ebx, edx);
- Slot* dot_arguments_slot =
- scope()->arguments_shadow()->AsVariable()->slot();
- Move(dot_arguments_slot, ecx, ebx, edx);
+ Variable* arguments = scope()->arguments()->AsVariable();
+ if (arguments != NULL) {
+ // Function uses arguments object.
+ Comment cmnt(masm_, "[ Allocate arguments object");
+ if (function_in_register) {
+ __ push(edi);
+ } else {
+ __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
}
+ // Receiver is just before the parameters on the caller's stack.
+ int offset = scope()->num_parameters() * kPointerSize;
+ __ lea(edx,
+ Operand(ebp, StandardFrameConstants::kCallerSPOffset + offset));
+ __ push(edx);
+ __ push(Immediate(Smi::FromInt(scope()->num_parameters())));
+ // Arguments to ArgumentsAccessStub:
+ // function, receiver address, parameter count.
+ // The stub will rewrite receiver and parameter count if the previous
+ // stack frame was an arguments adapter frame.
+ ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
+ __ CallStub(&stub);
+ __ mov(ecx, eax); // Duplicate result.
+ Move(arguments->slot(), eax, ebx, edx);
+ Slot* dot_arguments_slot =
+ scope()->arguments_shadow()->AsVariable()->slot();
+ Move(dot_arguments_slot, ecx, ebx, edx);
}
{ Comment cmnt(masm_, "[ Declarations");
@@ -1048,7 +1046,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ push(ecx); // Enumerable.
__ push(ebx); // Current entry.
__ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
- __ cmp(eax, Factory::null_value());
+ __ test(eax, Operand(eax));
__ j(equal, loop_statement.continue_target());
__ mov(ebx, Operand(eax));
@@ -2054,6 +2052,25 @@ void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
}
+void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
+ ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ VisitForValue(args->at(0), kAccumulator);
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+ // Just indicate false, as %_IsStringWrapperSafeForDefaultValueOf() is only
+ // used in a few functions in runtime.js which should not normally be hit by
+ // this compiler.
+ __ jmp(if_false);
+ Apply(context_, if_true, if_false);
+}
+
+
void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index d0eeb7734..24538461f 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -373,7 +373,13 @@ void MacroAssembler::AbortIfNotNumber(Register object) {
void MacroAssembler::AbortIfNotSmi(Register object) {
test(object, Immediate(kSmiTagMask));
- Assert(equal, "Operand not a smi");
+ Assert(equal, "Operand is not a smi");
+}
+
+
+void MacroAssembler::AbortIfSmi(Register object) {
+ test(object, Immediate(kSmiTagMask));
+ Assert(not_equal, "Operand is a smi");
}
@@ -1292,7 +1298,7 @@ void MacroAssembler::InvokeFunction(Register fun,
mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
mov(ebx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
SmiUntag(ebx);
- mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
+ mov(edx, FieldOperand(edi, JSFunction::kCodeOffset));
lea(edx, FieldOperand(edx, Code::kHeaderSize));
ParameterCount expected(ebx);
@@ -1344,8 +1350,7 @@ void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
// Make sure the code objects in the builtins object and in the
// builtin function are the same.
push(target);
- mov(target, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- mov(target, FieldOperand(target, SharedFunctionInfo::kCodeOffset));
+ mov(target, FieldOperand(edi, JSFunction::kCodeOffset));
cmp(target, Operand(esp, 0));
Assert(equal, "Builtin code object changed");
pop(target);
@@ -1510,6 +1515,59 @@ void MacroAssembler::Abort(const char* msg) {
}
+void MacroAssembler::JumpIfNotNumber(Register reg,
+ TypeInfo info,
+ Label* on_not_number) {
+ if (FLAG_debug_code) AbortIfSmi(reg);
+ if (!info.IsNumber()) {
+ cmp(FieldOperand(reg, HeapObject::kMapOffset),
+ Factory::heap_number_map());
+ j(not_equal, on_not_number);
+ }
+}
+
+
+void MacroAssembler::ConvertToInt32(Register dst,
+ Register source,
+ Register scratch,
+ TypeInfo info,
+ Label* on_not_int32) {
+ if (FLAG_debug_code) {
+ AbortIfSmi(source);
+ AbortIfNotNumber(source);
+ }
+ if (info.IsInteger32()) {
+ cvttsd2si(dst, FieldOperand(source, HeapNumber::kValueOffset));
+ } else {
+ Label done;
+ bool push_pop = (scratch.is(no_reg) && dst.is(source));
+ ASSERT(!scratch.is(source));
+ if (push_pop) {
+ push(dst);
+ scratch = dst;
+ }
+ if (scratch.is(no_reg)) scratch = dst;
+ cvttsd2si(scratch, FieldOperand(source, HeapNumber::kValueOffset));
+ cmp(scratch, 0x80000000u);
+ if (push_pop) {
+ j(not_equal, &done);
+ pop(dst);
+ jmp(on_not_int32);
+ } else {
+ j(equal, on_not_int32);
+ }
+
+ bind(&done);
+ if (push_pop) {
+ add(Operand(esp), Immediate(kPointerSize)); // Pop.
+ }
+ if (!scratch.is(dst)) {
+ mov(dst, scratch);
+ }
+ }
+}
+
+
void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
Register instance_type,
Register scratch,
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index a17a2b4c1..0b16f0d40 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -29,6 +29,7 @@
#define V8_IA32_MACRO_ASSEMBLER_IA32_H_
#include "assembler.h"
+#include "type-info.h"
namespace v8 {
namespace internal {
@@ -225,12 +226,44 @@ class MacroAssembler: public Assembler {
sar(reg, kSmiTagSize);
}
+ // Modifies the register even if it does not contain a Smi!
+ void SmiUntag(Register reg, TypeInfo info, Label* non_smi) {
+ ASSERT(kSmiTagSize == 1);
+ sar(reg, kSmiTagSize);
+ if (info.IsSmi()) {
+ ASSERT(kSmiTag == 0);
+ j(carry, non_smi);
+ }
+ }
+
+ // Modifies the register even if it does not contain a Smi!
+ void SmiUntag(Register reg, Label* is_smi) {
+ ASSERT(kSmiTagSize == 1);
+ sar(reg, kSmiTagSize);
+ ASSERT(kSmiTag == 0);
+ j(not_carry, is_smi);
+ }
+
+ // Assumes input is a heap object.
+ void JumpIfNotNumber(Register reg, TypeInfo info, Label* on_not_number);
+
+ // Assumes input is a heap number. Jumps on things out of range. Also jumps
+ // on the min negative int32. Ignores frational parts.
+ void ConvertToInt32(Register dst,
+ Register src, // Can be the same as dst.
+ Register scratch, // Can be no_reg or dst, but not src.
+ TypeInfo info,
+ Label* on_not_int32);
+
// Abort execution if argument is not a number. Used in debug code.
void AbortIfNotNumber(Register object);
// Abort execution if argument is not a smi. Used in debug code.
void AbortIfNotSmi(Register object);
+ // Abort execution if argument is a smi. Used in debug code.
+ void AbortIfSmi(Register object);
+
// ---------------------------------------------------------------------------
// Exception handling
diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc
index c21dd4f00..b2c9dab83 100644
--- a/deps/v8/src/ia32/stub-cache-ia32.cc
+++ b/deps/v8/src/ia32/stub-cache-ia32.cc
@@ -1255,30 +1255,6 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
}
-// TODO(1241006): Avoid having lazy compile stubs specialized by the
-// number of arguments. It is not needed anymore.
-Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
- // Enter an internal frame.
- __ EnterInternalFrame();
-
- // Push a copy of the function onto the stack.
- __ push(edi);
-
- __ push(edi); // function is also the parameter to the runtime call
- __ CallRuntime(Runtime::kLazyCompile, 1);
- __ pop(edi);
-
- // Tear down temporary frame.
- __ LeaveInternalFrame();
-
- // Do a tail-call of the compiled function.
- __ lea(ecx, FieldOperand(eax, Code::kHeaderSize));
- __ jmp(Operand(ecx));
-
- return GetCodeWithFlags(flags, "LazyCompileStub");
-}
-
-
void CallStubCompiler::GenerateNameCheck(String* name, Label* miss) {
if (kind_ == Code::KEYED_CALL_IC) {
__ cmp(Operand(ecx), Immediate(Handle<String>(name)));
@@ -1595,6 +1571,9 @@ Object* CallStubCompiler::CompileStringCharCodeAtCall(Object* object,
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
+ // If object is not a string, bail out to regular call.
+ if (!object->IsString()) return Heap::undefined_value();
+
const int argc = arguments().immediate();
Label miss;
@@ -1605,6 +1584,7 @@ Object* CallStubCompiler::CompileStringCharCodeAtCall(Object* object,
GenerateDirectLoadGlobalFunctionPrototype(masm(),
Context::STRING_FUNCTION_INDEX,
eax);
+ ASSERT(object != holder);
CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
ebx, edx, edi, name, &miss);
@@ -1659,6 +1639,9 @@ Object* CallStubCompiler::CompileStringCharAtCall(Object* object,
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
+ // If object is not a string, bail out to regular call.
+ if (!object->IsString()) return Heap::undefined_value();
+
const int argc = arguments().immediate();
Label miss;
@@ -1670,6 +1653,7 @@ Object* CallStubCompiler::CompileStringCharAtCall(Object* object,
GenerateDirectLoadGlobalFunctionPrototype(masm(),
Context::STRING_FUNCTION_INDEX,
eax);
+ ASSERT(object != holder);
CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
ebx, edx, edi, name, &miss);
diff --git a/deps/v8/src/ia32/virtual-frame-ia32.h b/deps/v8/src/ia32/virtual-frame-ia32.h
index e00626b7d..b9faa4614 100644
--- a/deps/v8/src/ia32/virtual-frame-ia32.h
+++ b/deps/v8/src/ia32/virtual-frame-ia32.h
@@ -139,6 +139,22 @@ class VirtualFrame: public ZoneObject {
if (is_used(reg)) SpillElementAt(register_location(reg));
}
+ // Make the two registers distinct and spill them. Returns the second
+ // register. If the registers were not distinct then it returns the new
+ // second register.
+ Result MakeDistinctAndSpilled(Result* left, Result* right) {
+ Spill(left->reg());
+ Spill(right->reg());
+ if (left->reg().is(right->reg())) {
+ RegisterAllocator* allocator = cgen()->allocator();
+ Result fresh = allocator->Allocate();
+ ASSERT(fresh.is_valid());
+ masm()->mov(fresh.reg(), right->reg());
+ return fresh;
+ }
+ return *right;
+ }
+
// Spill all occurrences of an arbitrary register if possible. Return the
// register spilled or no_reg if it was not possible to free any register
// (ie, they all have frame-external references).
diff --git a/deps/v8/src/list-inl.h b/deps/v8/src/list-inl.h
index e41db11fc..e277bc872 100644
--- a/deps/v8/src/list-inl.h
+++ b/deps/v8/src/list-inl.h
@@ -127,6 +127,13 @@ void List<T, P>::Iterate(void (*callback)(T* x)) {
template<typename T, class P>
+template<class Visitor>
+void List<T, P>::Iterate(Visitor* visitor) {
+ for (int i = 0; i < length_; i++) visitor->Apply(&data_[i]);
+}
+
+
+template<typename T, class P>
bool List<T, P>::Contains(const T& elm) {
for (int i = 0; i < length_; i++) {
if (data_[i] == elm)
diff --git a/deps/v8/src/list.h b/deps/v8/src/list.h
index d3c2767a5..9abf61ce5 100644
--- a/deps/v8/src/list.h
+++ b/deps/v8/src/list.h
@@ -117,6 +117,8 @@ class List {
// Iterate through all list entries, starting at index 0.
void Iterate(void (*callback)(T* x));
+ template<class Visitor>
+ void Iterate(Visitor* visitor);
// Sort all list entries (using QuickSort)
void Sort(int (*cmp)(const T* x, const T* y));
diff --git a/deps/v8/src/liveedit-debugger.js b/deps/v8/src/liveedit-debugger.js
index c8c6f082c..be97989bf 100644
--- a/deps/v8/src/liveedit-debugger.js
+++ b/deps/v8/src/liveedit-debugger.js
@@ -800,9 +800,10 @@ Debug.LiveEdit = new function() {
this.end_position = raw_array[2];
this.param_num = raw_array[3];
this.code = raw_array[4];
- this.scope_info = raw_array[5];
- this.outer_index = raw_array[6];
- this.shared_function_info = raw_array[7];
+ this.code_scope_info = raw_array[5];
+ this.scope_info = raw_array[6];
+ this.outer_index = raw_array[7];
+ this.shared_function_info = raw_array[8];
this.next_sibling_index = null;
this.raw_array = raw_array;
}
diff --git a/deps/v8/src/liveedit.cc b/deps/v8/src/liveedit.cc
index 346d9ea22..769ac35c8 100644
--- a/deps/v8/src/liveedit.cc
+++ b/deps/v8/src/liveedit.cc
@@ -32,6 +32,7 @@
#include "compiler.h"
#include "oprofile-agent.h"
#include "scopes.h"
+#include "scopeinfo.h"
#include "global-handles.h"
#include "debug.h"
#include "memory.h"
@@ -500,12 +501,16 @@ class FunctionInfoWrapper : public JSArrayBasedStruct<FunctionInfoWrapper> {
this->SetSmiValueField(kParamNumOffset_, param_num);
this->SetSmiValueField(kParentIndexOffset_, parent_index);
}
- void SetFunctionCode(Handle<Code> function_code) {
- Handle<JSValue> wrapper = WrapInJSValue(*function_code);
- this->SetField(kCodeOffset_, wrapper);
+ void SetFunctionCode(Handle<Code> function_code,
+ Handle<Object> code_scope_info) {
+ Handle<JSValue> code_wrapper = WrapInJSValue(*function_code);
+ this->SetField(kCodeOffset_, code_wrapper);
+
+ Handle<JSValue> scope_wrapper = WrapInJSValue(*code_scope_info);
+ this->SetField(kCodeScopeInfoOffset_, scope_wrapper);
}
- void SetScopeInfo(Handle<Object> scope_info_array) {
- this->SetField(kScopeInfoOffset_, scope_info_array);
+ void SetOuterScopeInfo(Handle<Object> scope_info_array) {
+ this->SetField(kOuterScopeInfoOffset_, scope_info_array);
}
void SetSharedFunctionInfo(Handle<SharedFunctionInfo> info) {
Handle<JSValue> info_holder = WrapInJSValue(*info);
@@ -519,6 +524,11 @@ class FunctionInfoWrapper : public JSArrayBasedStruct<FunctionInfoWrapper> {
JSValue::cast(this->GetField(kCodeOffset_))));
return Handle<Code>::cast(raw_result);
}
+ Handle<Object> GetCodeScopeInfo() {
+ Handle<Object> raw_result = UnwrapJSValue(Handle<JSValue>(
+ JSValue::cast(this->GetField(kCodeScopeInfoOffset_))));
+ return raw_result;
+ }
int GetStartPosition() {
return this->GetSmiValueField(kStartPositionOffset_);
}
@@ -532,10 +542,11 @@ class FunctionInfoWrapper : public JSArrayBasedStruct<FunctionInfoWrapper> {
static const int kEndPositionOffset_ = 2;
static const int kParamNumOffset_ = 3;
static const int kCodeOffset_ = 4;
- static const int kScopeInfoOffset_ = 5;
- static const int kParentIndexOffset_ = 6;
- static const int kSharedFunctionInfoOffset_ = 7;
- static const int kSize_ = 8;
+ static const int kCodeScopeInfoOffset_ = 5;
+ static const int kOuterScopeInfoOffset_ = 6;
+ static const int kParentIndexOffset_ = 7;
+ static const int kSharedFunctionInfoOffset_ = 8;
+ static const int kSize_ = 9;
friend class JSArrayBasedStruct<FunctionInfoWrapper>;
};
@@ -671,7 +682,7 @@ class FunctionInfoListener {
void FunctionCode(Handle<Code> function_code) {
FunctionInfoWrapper info =
FunctionInfoWrapper::cast(result_->GetElement(current_parent_index_));
- info.SetFunctionCode(function_code);
+ info.SetFunctionCode(function_code, Handle<Object>(Heap::null_value()));
}
// Saves full information about a function: its code, its scope info
@@ -682,11 +693,12 @@ class FunctionInfoListener {
}
FunctionInfoWrapper info =
FunctionInfoWrapper::cast(result_->GetElement(current_parent_index_));
- info.SetFunctionCode(Handle<Code>(shared->code()));
+ info.SetFunctionCode(Handle<Code>(shared->code()),
+ Handle<Object>(shared->scope_info()));
info.SetSharedFunctionInfo(shared);
Handle<Object> scope_info_list(SerializeFunctionScope(scope));
- info.SetScopeInfo(scope_info_list);
+ info.SetOuterScopeInfo(scope_info_list);
}
Handle<JSArray> GetResult() {
@@ -855,6 +867,10 @@ Object* LiveEdit::ReplaceFunctionCode(Handle<JSArray> new_compile_info_array,
if (IsJSFunctionCode(shared_info->code())) {
ReplaceCodeObject(shared_info->code(),
*(compile_info_wrapper.GetFunctionCode()));
+ Handle<Object> code_scope_info = compile_info_wrapper.GetCodeScopeInfo();
+ if (code_scope_info->IsFixedArray()) {
+ shared_info->set_scope_info(SerializedScopeInfo::cast(*code_scope_info));
+ }
}
if (shared_info->debug_info()->IsDebugInfo()) {
@@ -1190,7 +1206,7 @@ static const char* DropFrames(Vector<StackFrame*> frames,
int bottom_js_frame_index,
Debug::FrameDropMode* mode,
Object*** restarter_frame_function_pointer) {
- if (Debug::kFrameDropperFrameSize < 0) {
+ if (!Debug::kFrameDropperSupported) {
return "Stack manipulations are not supported in this architecture.";
}
diff --git a/deps/v8/src/mark-compact.cc b/deps/v8/src/mark-compact.cc
index d9b0222a4..1a020e55d 100644
--- a/deps/v8/src/mark-compact.cc
+++ b/deps/v8/src/mark-compact.cc
@@ -32,6 +32,7 @@
#include "global-handles.h"
#include "ic-inl.h"
#include "mark-compact.h"
+#include "objects-visiting.h"
#include "stub-cache.h"
namespace v8 {
@@ -63,6 +64,7 @@ int MarkCompactCollector::live_cell_objects_size_ = 0;
int MarkCompactCollector::live_lo_objects_size_ = 0;
#endif
+
void MarkCompactCollector::CollectGarbage() {
// Make sure that Prepare() has been called. The individual steps below will
// update the state as they proceed.
@@ -244,14 +246,72 @@ static inline HeapObject* ShortCircuitConsString(Object** p) {
}
-// Helper class for marking pointers in HeapObjects.
-class MarkingVisitor : public ObjectVisitor {
+class StaticMarkingVisitor : public StaticVisitorBase {
public:
- void VisitPointer(Object** p) {
+ static inline void IterateBody(Map* map, HeapObject* obj) {
+ table_.GetVisitor(map)(map, obj);
+ }
+
+ static void Initialize() {
+ table_.Register(kVisitShortcutCandidate,
+ &FixedBodyVisitor<StaticMarkingVisitor,
+ ConsString::BodyDescriptor,
+ void>::Visit);
+
+ table_.Register(kVisitConsString,
+ &FixedBodyVisitor<StaticMarkingVisitor,
+ ConsString::BodyDescriptor,
+ void>::Visit);
+
+
+ table_.Register(kVisitFixedArray,
+ &FlexibleBodyVisitor<StaticMarkingVisitor,
+ FixedArray::BodyDescriptor,
+ void>::Visit);
+
+ table_.Register(kVisitSharedFunctionInfo,
+ &FixedBodyVisitor<StaticMarkingVisitor,
+ SharedFunctionInfo::BodyDescriptor,
+ void>::Visit);
+
+ table_.Register(kVisitByteArray, &DataObjectVisitor::Visit);
+ table_.Register(kVisitSeqAsciiString, &DataObjectVisitor::Visit);
+ table_.Register(kVisitSeqTwoByteString, &DataObjectVisitor::Visit);
+
+ table_.Register(kVisitOddball,
+ &FixedBodyVisitor<StaticMarkingVisitor,
+ Oddball::BodyDescriptor,
+ void>::Visit);
+ table_.Register(kVisitMap,
+ &FixedBodyVisitor<StaticMarkingVisitor,
+ Map::BodyDescriptor,
+ void>::Visit);
+
+ table_.Register(kVisitCode, &VisitCode);
+
+ table_.Register(kVisitPropertyCell,
+ &FixedBodyVisitor<StaticMarkingVisitor,
+ JSGlobalPropertyCell::BodyDescriptor,
+ void>::Visit);
+
+ table_.RegisterSpecializations<DataObjectVisitor,
+ kVisitDataObject,
+ kVisitDataObjectGeneric>();
+
+ table_.RegisterSpecializations<JSObjectVisitor,
+ kVisitJSObject,
+ kVisitJSObjectGeneric>();
+
+ table_.RegisterSpecializations<StructObjectVisitor,
+ kVisitStruct,
+ kVisitStructGeneric>();
+ }
+
+ INLINE(static void VisitPointer(Object** p)) {
MarkObjectByPointer(p);
}
- void VisitPointers(Object** start, Object** end) {
+ INLINE(static void VisitPointers(Object** start, Object** end)) {
// Mark all objects pointed to in [start, end).
const int kMinRangeForMarkingRecursion = 64;
if (end - start >= kMinRangeForMarkingRecursion) {
@@ -261,7 +321,7 @@ class MarkingVisitor : public ObjectVisitor {
for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
}
- void VisitCodeTarget(RelocInfo* rinfo) {
+ static inline void VisitCodeTarget(RelocInfo* rinfo) {
ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
Code* code = Code::GetCodeFromTargetAddress(rinfo->target_address());
if (FLAG_cleanup_ics_at_gc && code->is_inline_cache_stub()) {
@@ -273,7 +333,7 @@ class MarkingVisitor : public ObjectVisitor {
}
}
- void VisitDebugTarget(RelocInfo* rinfo) {
+ static inline void VisitDebugTarget(RelocInfo* rinfo) {
ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
rinfo->IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
@@ -282,19 +342,15 @@ class MarkingVisitor : public ObjectVisitor {
MarkCompactCollector::MarkObject(code);
}
- private:
// Mark object pointed to by p.
- void MarkObjectByPointer(Object** p) {
+ INLINE(static void MarkObjectByPointer(Object** p)) {
if (!(*p)->IsHeapObject()) return;
HeapObject* object = ShortCircuitConsString(p);
MarkCompactCollector::MarkObject(object);
}
- // Tells whether the mark sweep collection will perform compaction.
- bool IsCompacting() { return MarkCompactCollector::IsCompacting(); }
-
// Visit an unmarked object.
- void VisitUnmarkedObject(HeapObject* obj) {
+ static inline void VisitUnmarkedObject(HeapObject* obj) {
#ifdef DEBUG
ASSERT(Heap::Contains(obj));
ASSERT(!obj->IsMarked());
@@ -303,12 +359,12 @@ class MarkingVisitor : public ObjectVisitor {
MarkCompactCollector::SetMark(obj);
// Mark the map pointer and the body.
MarkCompactCollector::MarkObject(map);
- obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), this);
+ IterateBody(map, obj);
}
// Visit all unmarked objects pointed to by [start, end).
// Returns false if the operation fails (lack of stack space).
- inline bool VisitUnmarkedObjects(Object** start, Object** end) {
+ static inline bool VisitUnmarkedObjects(Object** start, Object** end) {
// Return false is we are close to the stack limit.
StackLimitCheck check;
if (check.HasOverflowed()) return false;
@@ -322,6 +378,60 @@ class MarkingVisitor : public ObjectVisitor {
}
return true;
}
+
+ static inline void VisitExternalReference(Address* p) { }
+ static inline void VisitRuntimeEntry(RelocInfo* rinfo) { }
+
+ private:
+ class DataObjectVisitor {
+ public:
+ template<int size>
+ static void VisitSpecialized(Map* map, HeapObject* object) {
+ }
+
+ static void Visit(Map* map, HeapObject* object) {
+ }
+ };
+
+ typedef FlexibleBodyVisitor<StaticMarkingVisitor,
+ JSObject::BodyDescriptor,
+ void> JSObjectVisitor;
+
+ typedef FlexibleBodyVisitor<StaticMarkingVisitor,
+ StructBodyDescriptor,
+ void> StructObjectVisitor;
+
+ static void VisitCode(Map* map, HeapObject* object) {
+ reinterpret_cast<Code*>(object)->CodeIterateBody<StaticMarkingVisitor>();
+ }
+
+ typedef void (*Callback)(Map* map, HeapObject* object);
+
+ static VisitorDispatchTable<Callback> table_;
+};
+
+
+VisitorDispatchTable<StaticMarkingVisitor::Callback>
+ StaticMarkingVisitor::table_;
+
+
+class MarkingVisitor : public ObjectVisitor {
+ public:
+ void VisitPointer(Object** p) {
+ StaticMarkingVisitor::VisitPointer(p);
+ }
+
+ void VisitPointers(Object** start, Object** end) {
+ StaticMarkingVisitor::VisitPointers(start, end);
+ }
+
+ void VisitCodeTarget(RelocInfo* rinfo) {
+ StaticMarkingVisitor::VisitCodeTarget(rinfo);
+ }
+
+ void VisitDebugTarget(RelocInfo* rinfo) {
+ StaticMarkingVisitor::VisitDebugTarget(rinfo);
+ }
};
@@ -336,11 +446,7 @@ class RootMarkingVisitor : public ObjectVisitor {
for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
}
- MarkingVisitor* stack_visitor() { return &stack_visitor_; }
-
private:
- MarkingVisitor stack_visitor_;
-
void MarkObjectByPointer(Object** p) {
if (!(*p)->IsHeapObject()) return;
@@ -351,14 +457,14 @@ class RootMarkingVisitor : public ObjectVisitor {
Map* map = object->map();
// Mark the object.
MarkCompactCollector::SetMark(object);
+
// Mark the map pointer and body, and push them on the marking stack.
MarkCompactCollector::MarkObject(map);
- object->IterateBody(map->instance_type(), object->SizeFromMap(map),
- &stack_visitor_);
+ StaticMarkingVisitor::IterateBody(map, object);
// Mark all the objects reachable from the map and body. May leave
// overflowed objects in the heap.
- MarkCompactCollector::EmptyMarkingStack(&stack_visitor_);
+ MarkCompactCollector::EmptyMarkingStack();
}
};
@@ -425,11 +531,12 @@ void MarkCompactCollector::MarkMapContents(Map* map) {
// Mark the Object* fields of the Map.
// Since the descriptor array has been marked already, it is fine
// that one of these fields contains a pointer to it.
- MarkingVisitor visitor; // Has no state or contents.
- visitor.VisitPointers(HeapObject::RawField(map,
- Map::kPointerFieldsBeginOffset),
- HeapObject::RawField(map,
- Map::kPointerFieldsEndOffset));
+ Object** start_slot = HeapObject::RawField(map,
+ Map::kPointerFieldsBeginOffset);
+
+ Object** end_slot = HeapObject::RawField(map, Map::kPointerFieldsEndOffset);
+
+ StaticMarkingVisitor::VisitPointers(start_slot, end_slot);
}
@@ -447,10 +554,11 @@ void MarkCompactCollector::MarkDescriptorArray(
ASSERT(contents->IsFixedArray());
ASSERT(contents->length() >= 2);
SetMark(contents);
- // Contents contains (value, details) pairs. If the details say
- // that the type of descriptor is MAP_TRANSITION, CONSTANT_TRANSITION,
- // or NULL_DESCRIPTOR, we don't mark the value as live. Only for
- // type MAP_TRANSITION is the value a Object* (a Map*).
+ // Contents contains (value, details) pairs. If the details say that
+ // the type of descriptor is MAP_TRANSITION, CONSTANT_TRANSITION, or
+ // NULL_DESCRIPTOR, we don't mark the value as live. Only for
+ // MAP_TRANSITION and CONSTANT_TRANSITION is the value an Object* (a
+ // Map*).
for (int i = 0; i < contents->length(); i += 2) {
// If the pair (value, details) at index i, i+1 is not
// a transition or null descriptor, mark the value.
@@ -529,7 +637,7 @@ void MarkCompactCollector::MarkSymbolTable() {
// Explicitly mark the prefix.
MarkingVisitor marker;
symbol_table->IteratePrefix(&marker);
- ProcessMarkingStack(&marker);
+ ProcessMarkingStack();
}
@@ -544,7 +652,7 @@ void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
// There may be overflowed objects in the heap. Visit them now.
while (marking_stack.overflowed()) {
RefillMarkingStack();
- EmptyMarkingStack(visitor->stack_visitor());
+ EmptyMarkingStack();
}
}
@@ -587,7 +695,7 @@ void MarkCompactCollector::MarkObjectGroups() {
// Before: the marking stack contains zero or more heap object pointers.
// After: the marking stack is empty, and all objects reachable from the
// marking stack have been marked, or are overflowed in the heap.
-void MarkCompactCollector::EmptyMarkingStack(MarkingVisitor* visitor) {
+void MarkCompactCollector::EmptyMarkingStack() {
while (!marking_stack.is_empty()) {
HeapObject* object = marking_stack.Pop();
ASSERT(object->IsHeapObject());
@@ -601,8 +709,8 @@ void MarkCompactCollector::EmptyMarkingStack(MarkingVisitor* visitor) {
map_word.ClearMark();
Map* map = map_word.ToMap();
MarkObject(map);
- object->IterateBody(map->instance_type(), object->SizeFromMap(map),
- visitor);
+
+ StaticMarkingVisitor::IterateBody(map, object);
}
}
@@ -652,22 +760,22 @@ void MarkCompactCollector::RefillMarkingStack() {
// stack. Before: the marking stack contains zero or more heap object
// pointers. After: the marking stack is empty and there are no overflowed
// objects in the heap.
-void MarkCompactCollector::ProcessMarkingStack(MarkingVisitor* visitor) {
- EmptyMarkingStack(visitor);
+void MarkCompactCollector::ProcessMarkingStack() {
+ EmptyMarkingStack();
while (marking_stack.overflowed()) {
RefillMarkingStack();
- EmptyMarkingStack(visitor);
+ EmptyMarkingStack();
}
}
-void MarkCompactCollector::ProcessObjectGroups(MarkingVisitor* visitor) {
+void MarkCompactCollector::ProcessObjectGroups() {
bool work_to_do = true;
ASSERT(marking_stack.is_empty());
while (work_to_do) {
MarkObjectGroups();
work_to_do = !marking_stack.is_empty();
- ProcessMarkingStack(visitor);
+ ProcessMarkingStack();
}
}
@@ -692,7 +800,7 @@ void MarkCompactCollector::MarkLiveObjects() {
// objects are unmarked. Mark objects reachable from object groups
// containing at least one marked object, and continue until no new
// objects are reachable from the object groups.
- ProcessObjectGroups(root_visitor.stack_visitor());
+ ProcessObjectGroups();
// The objects reachable from the roots or object groups are marked,
// yet unreachable objects are unmarked. Mark objects reachable
@@ -705,12 +813,12 @@ void MarkCompactCollector::MarkLiveObjects() {
GlobalHandles::IterateWeakRoots(&root_visitor);
while (marking_stack.overflowed()) {
RefillMarkingStack();
- EmptyMarkingStack(root_visitor.stack_visitor());
+ EmptyMarkingStack();
}
// Repeat the object groups to mark unmarked groups reachable from the
// weak roots.
- ProcessObjectGroups(root_visitor.stack_visitor());
+ ProcessObjectGroups();
// Prune the symbol table removing all symbols only pointed to by the
// symbol table. Cannot use symbol_table() here because the symbol
@@ -1091,16 +1199,35 @@ static void MigrateObject(Address dst,
}
+class StaticPointersToNewGenUpdatingVisitor : public
+ StaticNewSpaceVisitor<StaticPointersToNewGenUpdatingVisitor> {
+ public:
+ static inline void VisitPointer(Object** p) {
+ if (!(*p)->IsHeapObject()) return;
+
+ HeapObject* obj = HeapObject::cast(*p);
+ Address old_addr = obj->address();
+
+ if (Heap::new_space()->Contains(obj)) {
+ ASSERT(Heap::InFromSpace(*p));
+ *p = HeapObject::FromAddress(Memory::Address_at(old_addr));
+ }
+ }
+};
+
+
// Visitor for updating pointers from live objects in old spaces to new space.
// It does not expect to encounter pointers to dead objects.
class PointersToNewGenUpdatingVisitor: public ObjectVisitor {
public:
void VisitPointer(Object** p) {
- UpdatePointer(p);
+ StaticPointersToNewGenUpdatingVisitor::VisitPointer(p);
}
void VisitPointers(Object** start, Object** end) {
- for (Object** p = start; p < end; p++) UpdatePointer(p);
+ for (Object** p = start; p < end; p++) {
+ StaticPointersToNewGenUpdatingVisitor::VisitPointer(p);
+ }
}
void VisitCodeTarget(RelocInfo* rinfo) {
@@ -1119,19 +1246,6 @@ class PointersToNewGenUpdatingVisitor: public ObjectVisitor {
VisitPointer(&target);
rinfo->set_call_address(Code::cast(target)->instruction_start());
}
-
- private:
- void UpdatePointer(Object** p) {
- if (!(*p)->IsHeapObject()) return;
-
- HeapObject* obj = HeapObject::cast(*p);
- Address old_addr = obj->address();
-
- if (Heap::new_space()->Contains(obj)) {
- ASSERT(Heap::InFromSpace(*p));
- *p = HeapObject::FromAddress(Memory::Address_at(old_addr));
- }
- }
};
@@ -1248,15 +1362,12 @@ static void SweepNewSpace(NewSpace* space) {
PointersToNewGenUpdatingVisitor updating_visitor;
// Update pointers in to space.
- HeapObject* object;
- for (Address current = space->bottom();
- current < space->top();
- current += object->Size()) {
- object = HeapObject::FromAddress(current);
-
- object->IterateBody(object->map()->instance_type(),
- object->Size(),
- &updating_visitor);
+ Address current = space->bottom();
+ while (current < space->top()) {
+ HeapObject* object = HeapObject::FromAddress(current);
+ current +=
+ StaticPointersToNewGenUpdatingVisitor::IterateBody(object->map(),
+ object);
}
// Update roots.
@@ -1758,7 +1869,9 @@ void MarkCompactCollector::SweepSpaces() {
SweepSpace(Heap::old_data_space(), &DeallocateOldDataBlock);
SweepSpace(Heap::code_space(), &DeallocateCodeBlock);
SweepSpace(Heap::cell_space(), &DeallocateCellBlock);
- SweepNewSpace(Heap::new_space());
+ { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
+ SweepNewSpace(Heap::new_space());
+ }
SweepSpace(Heap::map_space(), &DeallocateMapBlock);
Heap::IterateDirtyRegions(Heap::map_space(),
@@ -2327,4 +2440,11 @@ void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj) {
#endif
}
+
+void MarkCompactCollector::Initialize() {
+ StaticPointersToNewGenUpdatingVisitor::Initialize();
+ StaticMarkingVisitor::Initialize();
+}
+
+
} } // namespace v8::internal
diff --git a/deps/v8/src/mark-compact.h b/deps/v8/src/mark-compact.h
index 1d289a759..ad635867c 100644
--- a/deps/v8/src/mark-compact.h
+++ b/deps/v8/src/mark-compact.h
@@ -86,6 +86,9 @@ class MarkCompactCollector: public AllStatic {
force_compaction_ = value;
}
+
+ static void Initialize();
+
// Prepares for GC by resetting relocation info in old and map spaces and
// choosing spaces to compact.
static void Prepare(GCTracer* tracer);
@@ -171,6 +174,7 @@ class MarkCompactCollector: public AllStatic {
friend class RootMarkingVisitor;
friend class MarkingVisitor;
+ friend class StaticMarkingVisitor;
// Marking operations for objects reachable from roots.
static void MarkLiveObjects();
@@ -214,17 +218,17 @@ class MarkCompactCollector: public AllStatic {
// Mark all objects in an object group with at least one marked
// object, then all objects reachable from marked objects in object
// groups, and repeat.
- static void ProcessObjectGroups(MarkingVisitor* visitor);
+ static void ProcessObjectGroups();
// Mark objects reachable (transitively) from objects in the marking stack
// or overflowed in the heap.
- static void ProcessMarkingStack(MarkingVisitor* visitor);
+ static void ProcessMarkingStack();
// Mark objects reachable (transitively) from objects in the marking
// stack. This function empties the marking stack, but may leave
// overflowed objects in the heap, in which case the marking stack's
// overflow flag will be set.
- static void EmptyMarkingStack(MarkingVisitor* visitor);
+ static void EmptyMarkingStack();
// Refill the marking stack with overflowed objects from the heap. This
// function either leaves the marking stack full or clears the overflow
diff --git a/deps/v8/src/messages.js b/deps/v8/src/messages.js
index b0f8aa16e..0375e8a17 100644
--- a/deps/v8/src/messages.js
+++ b/deps/v8/src/messages.js
@@ -707,14 +707,20 @@ CallSite.prototype.getMethodName = function () {
// See if we can find a unique property on the receiver that holds
// this function.
var ownName = this.fun.name;
- if (ownName && this.receiver && this.receiver[ownName] === this.fun)
+ if (ownName && this.receiver &&
+ (ObjectLookupGetter.call(this.receiver, ownName) === this.fun ||
+ ObjectLookupSetter.call(this.receiver, ownName) === this.fun ||
+ this.receiver[ownName] === this.fun)) {
// To handle DontEnum properties we guess that the method has
// the same name as the function.
return ownName;
+ }
var name = null;
for (var prop in this.receiver) {
- if (this.receiver[prop] === this.fun) {
- // If we find more than one match bail out to avoid confusion
+ if (this.receiver.__lookupGetter__(prop) === this.fun ||
+ this.receiver.__lookupSetter__(prop) === this.fun ||
+ (!this.receiver.__lookupGetter__(prop) && this.receiver[prop] === this.fun)) {
+ // If we find more than one match bail out to avoid confusion.
if (name)
return null;
name = prop;
diff --git a/deps/v8/src/mips/debug-mips.cc b/deps/v8/src/mips/debug-mips.cc
index 47961fac7..b8ae68e39 100644
--- a/deps/v8/src/mips/debug-mips.cc
+++ b/deps/v8/src/mips/debug-mips.cc
@@ -114,15 +114,10 @@ void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
masm->Abort("LiveEdit frame dropping is not supported on mips");
}
-#undef __
+const bool Debug::kFrameDropperSupported = false;
-Object** Debug::SetUpFrameDropperFrame(StackFrame* bottom_js_frame,
- Handle<Code> code) {
- UNREACHABLE();
- return NULL;
-}
-const int Debug::kFrameDropperFrameSize = -1;
+#undef __
#endif // ENABLE_DEBUGGER_SUPPORT
diff --git a/deps/v8/src/mips/simulator-mips.cc b/deps/v8/src/mips/simulator-mips.cc
index 886b9e4fa..57bed6a04 100644
--- a/deps/v8/src/mips/simulator-mips.cc
+++ b/deps/v8/src/mips/simulator-mips.cc
@@ -606,7 +606,7 @@ void Simulator::set_fpu_register(int fpureg, int32_t value) {
void Simulator::set_fpu_register_double(int fpureg, double value) {
ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0));
- *v8i::BitCast<double*, int32_t*>(&FPUregisters_[fpureg]) = value;
+ *v8i::BitCast<double*>(&FPUregisters_[fpureg]) = value;
}
@@ -627,8 +627,7 @@ int32_t Simulator::get_fpu_register(int fpureg) const {
double Simulator::get_fpu_register_double(int fpureg) const {
ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0));
- return *v8i::BitCast<double*, int32_t*>(
- const_cast<int32_t*>(&FPUregisters_[fpureg]));
+ return *v8i::BitCast<double*>(const_cast<int32_t*>(&FPUregisters_[fpureg]));
}
// Raw access to the PC register.
@@ -903,7 +902,7 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
break;
case MFHC1:
fp_out = get_fpu_register_double(fs_reg);
- alu_out = *v8i::BitCast<int32_t*, double*>(&fp_out);
+ alu_out = *v8i::BitCast<int32_t*>(&fp_out);
break;
case MTC1:
case MTHC1:
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index 101096d6c..5e8022e51 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -1493,6 +1493,16 @@ int DescriptorArray::Search(String* name) {
}
+int DescriptorArray::SearchWithCache(String* name) {
+ int number = DescriptorLookupCache::Lookup(this, name);
+ if (number == DescriptorLookupCache::kAbsent) {
+ number = Search(name);
+ DescriptorLookupCache::Update(this, name, number);
+ }
+ return number;
+}
+
+
String* DescriptorArray::GetKey(int descriptor_number) {
ASSERT(descriptor_number < number_of_descriptors());
return String::cast(get(ToKeyIndex(descriptor_number)));
@@ -2060,21 +2070,8 @@ void ExternalFloatArray::set(int index, float value) {
ptr[index] = value;
}
-inline Scavenger Map::scavenger() {
- Scavenger callback = reinterpret_cast<Scavenger>(
- READ_INTPTR_FIELD(this, kScavengerCallbackOffset));
-
- ASSERT(callback == Heap::GetScavenger(instance_type(),
- instance_size()));
-
- return callback;
-}
-inline void Map::set_scavenger(Scavenger callback) {
- WRITE_INTPTR_FIELD(this,
- kScavengerCallbackOffset,
- reinterpret_cast<intptr_t>(callback));
-}
+INT_ACCESSORS(Map, visitor_id, kScavengerCallbackOffset)
int Map::instance_size() {
return READ_BYTE_FIELD(this, kInstanceSizeOffset) << kPointerSizeLog2;
@@ -2099,7 +2096,7 @@ int HeapObject::SizeFromMap(Map* map) {
(kStringTag | kConsStringTag) ||
instance_type == JS_ARRAY_TYPE) return map->instance_size();
if (instance_type == FIXED_ARRAY_TYPE) {
- return reinterpret_cast<FixedArray*>(this)->FixedArraySize();
+ return FixedArray::BodyDescriptor::SizeOf(map, this);
}
if (instance_type == BYTE_ARRAY_TYPE) {
return reinterpret_cast<ByteArray*>(this)->ByteArraySize();
@@ -2661,8 +2658,7 @@ void SharedFunctionInfo::set_scope_info(SerializedScopeInfo* value,
bool SharedFunctionInfo::is_compiled() {
- // TODO(1242782): Create a code kind for uncompiled code.
- return code()->kind() != Code::STUB;
+ return code() != Builtins::builtin(Builtins::LazyCompile);
}
@@ -2694,12 +2690,14 @@ bool JSFunction::IsBuiltin() {
Code* JSFunction::code() {
- return shared()->code();
+ return Code::cast(READ_FIELD(this, kCodeOffset));
}
void JSFunction::set_code(Code* value) {
- shared()->set_code(value);
+ // Skip the write barrier because code is never in new space.
+ ASSERT(!Heap::InNewSpace(value));
+ WRITE_FIELD(this, kCodeOffset, value);
}
@@ -2771,7 +2769,7 @@ bool JSFunction::should_have_prototype() {
bool JSFunction::is_compiled() {
- return shared()->is_compiled();
+ return code() != Builtins::builtin(Builtins::LazyCompile);
}
@@ -2818,12 +2816,6 @@ void Proxy::set_proxy(Address value) {
}
-void Proxy::ProxyIterateBody(ObjectVisitor* visitor) {
- visitor->VisitExternalReference(
- reinterpret_cast<Address *>(FIELD_ADDR(this, kProxyOffset)));
-}
-
-
ACCESSORS(JSValue, value, Object, kValueOffset)
@@ -3307,6 +3299,74 @@ Object* FixedArray::Copy() {
}
+int JSObject::BodyDescriptor::SizeOf(Map* map, HeapObject* object) {
+ return map->instance_size();
+}
+
+
+void Proxy::ProxyIterateBody(ObjectVisitor* v) {
+ v->VisitExternalReference(
+ reinterpret_cast<Address *>(FIELD_ADDR(this, kProxyOffset)));
+}
+
+
+template<typename StaticVisitor>
+void Proxy::ProxyIterateBody() {
+ StaticVisitor::VisitExternalReference(
+ reinterpret_cast<Address *>(FIELD_ADDR(this, kProxyOffset)));
+}
+
+
+void ExternalAsciiString::ExternalAsciiStringIterateBody(ObjectVisitor* v) {
+ typedef v8::String::ExternalAsciiStringResource Resource;
+ v->VisitExternalAsciiString(
+ reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset)));
+}
+
+
+template<typename StaticVisitor>
+void ExternalAsciiString::ExternalAsciiStringIterateBody() {
+ typedef v8::String::ExternalAsciiStringResource Resource;
+ StaticVisitor::VisitExternalAsciiString(
+ reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset)));
+}
+
+
+void ExternalTwoByteString::ExternalTwoByteStringIterateBody(ObjectVisitor* v) {
+ typedef v8::String::ExternalStringResource Resource;
+ v->VisitExternalTwoByteString(
+ reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset)));
+}
+
+
+template<typename StaticVisitor>
+void ExternalTwoByteString::ExternalTwoByteStringIterateBody() {
+ typedef v8::String::ExternalStringResource Resource;
+ StaticVisitor::VisitExternalTwoByteString(
+ reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset)));
+}
+
+#define SLOT_ADDR(obj, offset) \
+ reinterpret_cast<Object**>((obj)->address() + offset)
+
+template<int start_offset, int end_offset, int size>
+void FixedBodyDescriptor<start_offset, end_offset, size>::IterateBody(
+ HeapObject* obj,
+ ObjectVisitor* v) {
+ v->VisitPointers(SLOT_ADDR(obj, start_offset), SLOT_ADDR(obj, end_offset));
+}
+
+
+template<int start_offset>
+void FlexibleBodyDescriptor<start_offset>::IterateBody(HeapObject* obj,
+ int object_size,
+ ObjectVisitor* v) {
+ v->VisitPointers(SLOT_ADDR(obj, start_offset), SLOT_ADDR(obj, object_size));
+}
+
+#undef SLOT_ADDR
+
+
#undef CAST_ACCESSOR
#undef INT_ACCESSORS
#undef SMI_ACCESSORS
diff --git a/deps/v8/src/objects-visiting.cc b/deps/v8/src/objects-visiting.cc
new file mode 100644
index 000000000..293c9bf8d
--- /dev/null
+++ b/deps/v8/src/objects-visiting.cc
@@ -0,0 +1,139 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "ic-inl.h"
+#include "objects-visiting.h"
+
+namespace v8 {
+namespace internal {
+
+
+static inline bool IsShortcutCandidate(int type) {
+ return ((type & kShortcutTypeMask) == kShortcutTypeTag);
+}
+
+
+StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
+ int instance_type,
+ int instance_size) {
+ if (instance_type < FIRST_NONSTRING_TYPE) {
+ switch (instance_type & kStringRepresentationMask) {
+ case kSeqStringTag:
+ if ((instance_type & kStringEncodingMask) == kAsciiStringTag) {
+ return kVisitSeqAsciiString;
+ } else {
+ return kVisitSeqTwoByteString;
+ }
+
+ case kConsStringTag:
+ if (IsShortcutCandidate(instance_type)) {
+ return kVisitShortcutCandidate;
+ } else {
+ return kVisitConsString;
+ }
+
+ case kExternalStringTag:
+ return GetVisitorIdForSize(kVisitDataObject,
+ kVisitDataObjectGeneric,
+ ExternalString::kSize);
+ }
+ UNREACHABLE();
+ }
+
+ switch (instance_type) {
+ case BYTE_ARRAY_TYPE:
+ return kVisitByteArray;
+
+ case FIXED_ARRAY_TYPE:
+ return kVisitFixedArray;
+
+ case ODDBALL_TYPE:
+ return kVisitOddball;
+
+ case MAP_TYPE:
+ return kVisitMap;
+
+ case CODE_TYPE:
+ return kVisitCode;
+
+ case JS_GLOBAL_PROPERTY_CELL_TYPE:
+ return kVisitPropertyCell;
+
+ case SHARED_FUNCTION_INFO_TYPE:
+ return kVisitSharedFunctionInfo;
+
+ case PROXY_TYPE:
+ return GetVisitorIdForSize(kVisitDataObject,
+ kVisitDataObjectGeneric,
+ Proxy::kSize);
+
+ case FILLER_TYPE:
+ return kVisitDataObjectGeneric;
+
+ case JS_OBJECT_TYPE:
+ case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
+ case JS_VALUE_TYPE:
+ case JS_ARRAY_TYPE:
+ case JS_REGEXP_TYPE:
+ case JS_FUNCTION_TYPE:
+ case JS_GLOBAL_PROXY_TYPE:
+ case JS_GLOBAL_OBJECT_TYPE:
+ case JS_BUILTINS_OBJECT_TYPE:
+ return GetVisitorIdForSize(kVisitJSObject,
+ kVisitJSObjectGeneric,
+ instance_size);
+
+ case HEAP_NUMBER_TYPE:
+ case PIXEL_ARRAY_TYPE:
+ case EXTERNAL_BYTE_ARRAY_TYPE:
+ case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
+ case EXTERNAL_SHORT_ARRAY_TYPE:
+ case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
+ case EXTERNAL_INT_ARRAY_TYPE:
+ case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
+ case EXTERNAL_FLOAT_ARRAY_TYPE:
+ return GetVisitorIdForSize(kVisitDataObject,
+ kVisitDataObjectGeneric,
+ instance_size);
+
+#define MAKE_STRUCT_CASE(NAME, Name, name) \
+ case NAME##_TYPE:
+ STRUCT_LIST(MAKE_STRUCT_CASE)
+#undef MAKE_STRUCT_CASE
+ return GetVisitorIdForSize(kVisitStruct,
+ kVisitStructGeneric,
+ instance_size);
+
+ default:
+ UNREACHABLE();
+ return kVisitorIdCount;
+ }
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/objects-visiting.h b/deps/v8/src/objects-visiting.h
new file mode 100644
index 000000000..6280bac4e
--- /dev/null
+++ b/deps/v8/src/objects-visiting.h
@@ -0,0 +1,382 @@
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_OBJECTS_ITERATION_H_
+#define V8_OBJECTS_ITERATION_H_
+
+// This file provides base classes and auxiliary methods for defining
+// static object visitors used during GC.
+// Visiting HeapObject body with a normal ObjectVisitor requires performing
+// two switches on object's instance type to determine object size and layout
+// and one or more virtual method calls on visitor itself.
+// Static visitor is different: it provides a dispatch table which contains
+// pointers to specialized visit functions. Each map has the visitor_id
+// field which contains an index of specialized visitor to use.
+
+namespace v8 {
+namespace internal {
+
+
+// Base class for all static visitors.
+class StaticVisitorBase : public AllStatic {
+ public:
+ enum VisitorId {
+ kVisitSeqAsciiString = 0,
+ kVisitSeqTwoByteString,
+ kVisitShortcutCandidate,
+ kVisitByteArray,
+ kVisitFixedArray,
+
+ // For data objects, JS objects and structs along with generic visitor which
+ // can visit object of any size we provide visitors specialized by
+ // object size in words.
+ // Ids of specialized visitors are declared in a linear order (without
+ // holes) starting from the id of visitor specialized for 2 words objects
+ // (base visitor id) and ending with the id of generic visitor.
+ // Method GetVisitorIdForSize depends on this ordering to calculate visitor
+ // id of specialized visitor from given instance size, base visitor id and
+ // generic visitor's id.
+
+ kVisitDataObject,
+ kVisitDataObject2 = kVisitDataObject,
+ kVisitDataObject3,
+ kVisitDataObject4,
+ kVisitDataObject5,
+ kVisitDataObject6,
+ kVisitDataObject7,
+ kVisitDataObject8,
+ kVisitDataObject9,
+ kVisitDataObjectGeneric,
+
+ kVisitJSObject,
+ kVisitJSObject2 = kVisitJSObject,
+ kVisitJSObject3,
+ kVisitJSObject4,
+ kVisitJSObject5,
+ kVisitJSObject6,
+ kVisitJSObject7,
+ kVisitJSObject8,
+ kVisitJSObject9,
+ kVisitJSObjectGeneric,
+
+ kVisitStruct,
+ kVisitStruct2 = kVisitStruct,
+ kVisitStruct3,
+ kVisitStruct4,
+ kVisitStruct5,
+ kVisitStruct6,
+ kVisitStruct7,
+ kVisitStruct8,
+ kVisitStruct9,
+ kVisitStructGeneric,
+
+ kVisitConsString,
+ kVisitOddball,
+ kVisitCode,
+ kVisitMap,
+ kVisitPropertyCell,
+ kVisitSharedFunctionInfo,
+
+ kVisitorIdCount,
+ kMinObjectSizeInWords = 2
+ };
+
+ // Determine which specialized visitor should be used for given instance type
+ // and instance type.
+ static VisitorId GetVisitorId(int instance_type, int instance_size);
+
+ static VisitorId GetVisitorId(Map* map) {
+ return GetVisitorId(map->instance_type(), map->instance_size());
+ }
+
+ // For visitors that allow specialization by size calculate VisitorId based
+ // on size, base visitor id and generic visitor id.
+ static VisitorId GetVisitorIdForSize(VisitorId base,
+ VisitorId generic,
+ int object_size) {
+ ASSERT((base == kVisitDataObject) ||
+ (base == kVisitStruct) ||
+ (base == kVisitJSObject));
+ ASSERT(IsAligned(object_size, kPointerSize));
+ ASSERT(kMinObjectSizeInWords * kPointerSize <= object_size);
+ ASSERT(object_size < Page::kMaxHeapObjectSize);
+
+ const VisitorId specialization = static_cast<VisitorId>(
+ base + (object_size >> kPointerSizeLog2) - kMinObjectSizeInWords);
+
+ return Min(specialization, generic);
+ }
+};
+
+
+template<typename Callback>
+class VisitorDispatchTable {
+ public:
+ inline Callback GetVisitor(Map* map) {
+ return callbacks_[map->visitor_id()];
+ }
+
+ void Register(StaticVisitorBase::VisitorId id, Callback callback) {
+ ASSERT((0 <= id) && (id < StaticVisitorBase::kVisitorIdCount));
+ callbacks_[id] = callback;
+ }
+
+ template<typename Visitor,
+ StaticVisitorBase::VisitorId base,
+ StaticVisitorBase::VisitorId generic,
+ int object_size_in_words>
+ void RegisterSpecialization() {
+ static const int size = object_size_in_words * kPointerSize;
+ Register(StaticVisitorBase::GetVisitorIdForSize(base, generic, size),
+ &Visitor::template VisitSpecialized<size>);
+ }
+
+
+ template<typename Visitor,
+ StaticVisitorBase::VisitorId base,
+ StaticVisitorBase::VisitorId generic>
+ void RegisterSpecializations() {
+ STATIC_ASSERT(
+ (generic - base + StaticVisitorBase::kMinObjectSizeInWords) == 10);
+ RegisterSpecialization<Visitor, base, generic, 2>();
+ RegisterSpecialization<Visitor, base, generic, 3>();
+ RegisterSpecialization<Visitor, base, generic, 4>();
+ RegisterSpecialization<Visitor, base, generic, 5>();
+ RegisterSpecialization<Visitor, base, generic, 6>();
+ RegisterSpecialization<Visitor, base, generic, 7>();
+ RegisterSpecialization<Visitor, base, generic, 8>();
+ RegisterSpecialization<Visitor, base, generic, 9>();
+ Register(generic, &Visitor::Visit);
+ }
+
+ private:
+ Callback callbacks_[StaticVisitorBase::kVisitorIdCount];
+};
+
+
+template<typename StaticVisitor>
+class BodyVisitorBase : public AllStatic {
+ public:
+ static inline void IteratePointers(HeapObject* object,
+ int start_offset,
+ int end_offset) {
+ Object** start_slot = reinterpret_cast<Object**>(object->address() +
+ start_offset);
+ Object** end_slot = reinterpret_cast<Object**>(object->address() +
+ end_offset);
+ StaticVisitor::VisitPointers(start_slot, end_slot);
+ }
+};
+
+
+template<typename StaticVisitor, typename BodyDescriptor, typename ReturnType>
+class FlexibleBodyVisitor : public BodyVisitorBase<StaticVisitor> {
+ public:
+ static inline ReturnType Visit(Map* map, HeapObject* object) {
+ int object_size = BodyDescriptor::SizeOf(map, object);
+ IteratePointers(object, BodyDescriptor::kStartOffset, object_size);
+ return static_cast<ReturnType>(object_size);
+ }
+
+ template<int object_size>
+ static inline ReturnType VisitSpecialized(Map* map, HeapObject* object) {
+ IteratePointers(object, BodyDescriptor::kStartOffset, object_size);
+ return static_cast<ReturnType>(object_size);
+ }
+};
+
+
+template<typename StaticVisitor, typename BodyDescriptor, typename ReturnType>
+class FixedBodyVisitor : public BodyVisitorBase<StaticVisitor> {
+ public:
+ static inline ReturnType Visit(Map* map, HeapObject* object) {
+ IteratePointers(object,
+ BodyDescriptor::kStartOffset,
+ BodyDescriptor::kEndOffset);
+ return static_cast<ReturnType>(BodyDescriptor::kSize);
+ }
+};
+
+
+// Base class for visitors used for a linear new space iteration.
+// IterateBody returns size of visited object.
+// Certain types of objects (i.e. Code objects) are not handled
+// by dispatch table of this visitor because they cannot appear
+// in the new space.
+//
+// This class is intended to be used in the following way:
+//
+// class SomeVisitor : public StaticNewSpaceVisitor<SomeVisitor> {
+// ...
+// }
+//
+// This is an example of Curiously recurring template pattern
+// (see http://en.wikipedia.org/wiki/Curiously_recurring_template_pattern).
+// We use CRTP to guarantee aggressive compile time optimizations (i.e.
+// inlining and specialization of StaticVisitor::VisitPointers methods).
+template<typename StaticVisitor>
+class StaticNewSpaceVisitor : public StaticVisitorBase {
+ public:
+ static void Initialize() {
+ table_.Register(kVisitShortcutCandidate,
+ &FixedBodyVisitor<StaticVisitor,
+ ConsString::BodyDescriptor,
+ int>::Visit);
+
+ table_.Register(kVisitConsString,
+ &FixedBodyVisitor<StaticVisitor,
+ ConsString::BodyDescriptor,
+ int>::Visit);
+
+ table_.Register(kVisitFixedArray,
+ &FlexibleBodyVisitor<StaticVisitor,
+ FixedArray::BodyDescriptor,
+ int>::Visit);
+
+ table_.Register(kVisitByteArray, &VisitByteArray);
+
+ table_.Register(kVisitSharedFunctionInfo,
+ &FixedBodyVisitor<StaticVisitor,
+ SharedFunctionInfo::BodyDescriptor,
+ int>::Visit);
+
+ table_.Register(kVisitSeqAsciiString, &VisitSeqAsciiString);
+
+ table_.Register(kVisitSeqTwoByteString, &VisitSeqTwoByteString);
+
+ table_.RegisterSpecializations<DataObjectVisitor,
+ kVisitDataObject,
+ kVisitDataObjectGeneric>();
+ table_.RegisterSpecializations<JSObjectVisitor,
+ kVisitJSObject,
+ kVisitJSObjectGeneric>();
+ table_.RegisterSpecializations<StructVisitor,
+ kVisitStruct,
+ kVisitStructGeneric>();
+ }
+
+ static inline int IterateBody(Map* map, HeapObject* obj) {
+ return table_.GetVisitor(map)(map, obj);
+ }
+
+ static inline void VisitPointers(Object** start, Object** end) {
+ for (Object** p = start; p < end; p++) StaticVisitor::VisitPointer(p);
+ }
+
+ private:
+ static inline int VisitByteArray(Map* map, HeapObject* object) {
+ return reinterpret_cast<ByteArray*>(object)->ByteArraySize();
+ }
+
+ static inline int VisitSeqAsciiString(Map* map, HeapObject* object) {
+ return SeqAsciiString::cast(object)->
+ SeqAsciiStringSize(map->instance_type());
+ }
+
+ static inline int VisitSeqTwoByteString(Map* map, HeapObject* object) {
+ return SeqTwoByteString::cast(object)->
+ SeqTwoByteStringSize(map->instance_type());
+ }
+
+ class DataObjectVisitor {
+ public:
+ template<int object_size>
+ static inline int VisitSpecialized(Map* map, HeapObject* object) {
+ return object_size;
+ }
+
+ static inline int Visit(Map* map, HeapObject* object) {
+ return map->instance_size();
+ }
+ };
+
+ typedef FlexibleBodyVisitor<StaticVisitor,
+ StructBodyDescriptor,
+ int> StructVisitor;
+
+ typedef FlexibleBodyVisitor<StaticVisitor,
+ JSObject::BodyDescriptor,
+ int> JSObjectVisitor;
+
+ typedef int (*Callback)(Map* map, HeapObject* object);
+
+ static VisitorDispatchTable<Callback> table_;
+};
+
+
+template<typename StaticVisitor>
+VisitorDispatchTable<typename StaticNewSpaceVisitor<StaticVisitor>::Callback>
+ StaticNewSpaceVisitor<StaticVisitor>::table_;
+
+
+void Code::CodeIterateBody(ObjectVisitor* v) {
+ int mode_mask = RelocInfo::kCodeTargetMask |
+ RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+ RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
+ RelocInfo::ModeMask(RelocInfo::JS_RETURN) |
+ RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
+ RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
+
+ // Use the relocation info pointer before it is visited by
+ // the heap compaction in the next statement.
+ RelocIterator it(this, mode_mask);
+
+ IteratePointers(v,
+ kRelocationInfoOffset,
+ kRelocationInfoOffset + kPointerSize);
+
+ for (; !it.done(); it.next()) {
+ it.rinfo()->Visit(v);
+ }
+}
+
+
+template<typename StaticVisitor>
+void Code::CodeIterateBody() {
+ int mode_mask = RelocInfo::kCodeTargetMask |
+ RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+ RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
+ RelocInfo::ModeMask(RelocInfo::JS_RETURN) |
+ RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
+ RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
+
+ // Use the relocation info pointer before it is visited by
+ // the heap compaction in the next statement.
+ RelocIterator it(this, mode_mask);
+
+ StaticVisitor::VisitPointer(
+ reinterpret_cast<Object**>(this->address() + kRelocationInfoOffset));
+
+ for (; !it.done(); it.next()) {
+ it.rinfo()->template Visit<StaticVisitor>();
+ }
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_OBJECTS_ITERATION_H_
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index 4e20959a7..5687a3a53 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -33,6 +33,7 @@
#include "debug.h"
#include "execution.h"
#include "objects-inl.h"
+#include "objects-visiting.h"
#include "macro-assembler.h"
#include "scanner.h"
#include "scopeinfo.h"
@@ -1042,7 +1043,7 @@ int HeapObject::SlowSizeFromMap(Map* map) {
switch (instance_type) {
case FIXED_ARRAY_TYPE:
- return reinterpret_cast<FixedArray*>(this)->FixedArraySize();
+ return FixedArray::BodyDescriptor::SizeOf(map, this);
case BYTE_ARRAY_TYPE:
return reinterpret_cast<ByteArray*>(this)->ByteArraySize();
case CODE_TYPE:
@@ -1073,7 +1074,7 @@ void HeapObject::IterateBody(InstanceType type, int object_size,
case kSeqStringTag:
break;
case kConsStringTag:
- reinterpret_cast<ConsString*>(this)->ConsStringIterateBody(v);
+ ConsString::BodyDescriptor::IterateBody(this, v);
break;
case kExternalStringTag:
if ((type & kStringEncodingMask) == kAsciiStringTag) {
@@ -1090,7 +1091,7 @@ void HeapObject::IterateBody(InstanceType type, int object_size,
switch (type) {
case FIXED_ARRAY_TYPE:
- reinterpret_cast<FixedArray*>(this)->FixedArrayIterateBody(v);
+ FixedArray::BodyDescriptor::IterateBody(this, object_size, v);
break;
case JS_OBJECT_TYPE:
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
@@ -1101,23 +1102,22 @@ void HeapObject::IterateBody(InstanceType type, int object_size,
case JS_GLOBAL_PROXY_TYPE:
case JS_GLOBAL_OBJECT_TYPE:
case JS_BUILTINS_OBJECT_TYPE:
- reinterpret_cast<JSObject*>(this)->JSObjectIterateBody(object_size, v);
+ JSObject::BodyDescriptor::IterateBody(this, object_size, v);
break;
case ODDBALL_TYPE:
- reinterpret_cast<Oddball*>(this)->OddballIterateBody(v);
+ Oddball::BodyDescriptor::IterateBody(this, v);
break;
case PROXY_TYPE:
reinterpret_cast<Proxy*>(this)->ProxyIterateBody(v);
break;
case MAP_TYPE:
- reinterpret_cast<Map*>(this)->MapIterateBody(v);
+ Map::BodyDescriptor::IterateBody(this, v);
break;
case CODE_TYPE:
reinterpret_cast<Code*>(this)->CodeIterateBody(v);
break;
case JS_GLOBAL_PROPERTY_CELL_TYPE:
- reinterpret_cast<JSGlobalPropertyCell*>(this)
- ->JSGlobalPropertyCellIterateBody(v);
+ JSGlobalPropertyCell::BodyDescriptor::IterateBody(this, v);
break;
case HEAP_NUMBER_TYPE:
case FILLER_TYPE:
@@ -1131,16 +1131,15 @@ void HeapObject::IterateBody(InstanceType type, int object_size,
case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
case EXTERNAL_FLOAT_ARRAY_TYPE:
break;
- case SHARED_FUNCTION_INFO_TYPE: {
- SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(this);
- shared->SharedFunctionInfoIterateBody(v);
+ case SHARED_FUNCTION_INFO_TYPE:
+ SharedFunctionInfo::BodyDescriptor::IterateBody(this, v);
break;
- }
+
#define MAKE_STRUCT_CASE(NAME, Name, name) \
case NAME##_TYPE:
STRUCT_LIST(MAKE_STRUCT_CASE)
#undef MAKE_STRUCT_CASE
- IterateStructBody(object_size, v);
+ StructBodyDescriptor::IterateBody(this, object_size, v);
break;
default:
PrintF("Unknown type: %d\n", type);
@@ -1156,11 +1155,23 @@ void HeapObject::IterateStructBody(int object_size, ObjectVisitor* v) {
Object* HeapNumber::HeapNumberToBoolean() {
// NaN, +0, and -0 should return the false object
- switch (fpclassify(value())) {
- case FP_NAN: // fall through
- case FP_ZERO: return Heap::false_value();
- default: return Heap::true_value();
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ union IeeeDoubleLittleEndianArchType u;
+#elif __BYTE_ORDER == __BIG_ENDIAN
+ union IeeeDoubleBigEndianArchType u;
+#endif
+ u.d = value();
+ if (u.bits.exp == 2047) {
+ // Detect NaN for IEEE double precision floating point.
+ if ((u.bits.man_low | u.bits.man_high) != 0)
+ return Heap::false_value();
}
+ if (u.bits.exp == 0) {
+ // Detect +0, and -0 for IEEE double precision floating point.
+ if ((u.bits.man_low | u.bits.man_high) == 0)
+ return Heap::false_value();
+ }
+ return Heap::true_value();
}
@@ -1209,12 +1220,6 @@ String* JSObject::constructor_name() {
}
-void JSObject::JSObjectIterateBody(int object_size, ObjectVisitor* v) {
- // Iterate over all fields in the body. Assumes all are Object*.
- IteratePointers(v, kPropertiesOffset, object_size);
-}
-
-
Object* JSObject::AddFastPropertyUsingMap(Map* new_map,
String* name,
Object* value) {
@@ -1337,7 +1342,7 @@ Object* JSObject::AddConstantFunctionProperty(String* name,
if (attributes != NONE) {
return function;
}
- ConstTransitionDescriptor mark(name);
+ ConstTransitionDescriptor mark(name, Map::cast(new_map));
new_descriptors =
old_map->instance_descriptors()->CopyInsert(&mark, KEEP_TRANSITIONS);
if (new_descriptors->IsFailure()) {
@@ -1695,11 +1700,7 @@ bool JSObject::SetElementWithCallbackSetterInPrototypes(uint32_t index,
void JSObject::LookupInDescriptor(String* name, LookupResult* result) {
DescriptorArray* descriptors = map()->instance_descriptors();
- int number = DescriptorLookupCache::Lookup(descriptors, name);
- if (number == DescriptorLookupCache::kAbsent) {
- number = descriptors->Search(name);
- DescriptorLookupCache::Update(descriptors, name, number);
- }
+ int number = descriptors->SearchWithCache(name);
if (number != DescriptorArray::kNotFound) {
result->DescriptorResult(this, descriptors->GetDetails(number), number);
} else {
@@ -1817,8 +1818,10 @@ Object* JSObject::SetPropertyWithFailedAccessCheck(LookupResult* result,
}
}
+ HandleScope scope;
+ Handle<Object> value_handle(value);
Top::ReportFailedAccessCheck(this, v8::ACCESS_SET);
- return value;
+ return *value_handle;
}
@@ -1896,10 +1899,25 @@ Object* JSObject::SetProperty(LookupResult* result,
result->holder());
case INTERCEPTOR:
return SetPropertyWithInterceptor(name, value, attributes);
- case CONSTANT_TRANSITION:
- // Replace with a MAP_TRANSITION to a new map with a FIELD, even
- // if the value is a function.
+ case CONSTANT_TRANSITION: {
+ // If the same constant function is being added we can simply
+ // transition to the target map.
+ Map* target_map = result->GetTransitionMap();
+ DescriptorArray* target_descriptors = target_map->instance_descriptors();
+ int number = target_descriptors->SearchWithCache(name);
+ ASSERT(number != DescriptorArray::kNotFound);
+ ASSERT(target_descriptors->GetType(number) == CONSTANT_FUNCTION);
+ JSFunction* function =
+ JSFunction::cast(target_descriptors->GetValue(number));
+ ASSERT(!Heap::InNewSpace(function));
+ if (value == function) {
+ set_map(target_map);
+ return value;
+ }
+ // Otherwise, replace with a MAP_TRANSITION to a new map with a
+ // FIELD, even if the value is a constant function.
return ConvertDescriptorToFieldAndMapTransition(name, value, attributes);
+ }
case NULL_DESCRIPTOR:
return ConvertDescriptorToFieldAndMapTransition(name, value, attributes);
default:
@@ -2190,8 +2208,7 @@ Object* JSObject::NormalizeProperties(PropertyNormalizationMode mode,
int new_instance_size = map()->instance_size() - instance_size_delta;
new_map->set_inobject_properties(0);
new_map->set_instance_size(new_instance_size);
- new_map->set_scavenger(Heap::GetScavenger(new_map->instance_type(),
- new_map->instance_size()));
+ new_map->set_visitor_id(StaticVisitorBase::GetVisitorId(new_map));
Heap::CreateFillerObjectAt(this->address() + new_instance_size,
instance_size_delta);
}
@@ -3407,11 +3424,6 @@ void CodeCacheHashTable::RemoveByIndex(int index) {
}
-void FixedArray::FixedArrayIterateBody(ObjectVisitor* v) {
- IteratePointers(v, kHeaderSize, kHeaderSize + length() * kPointerSize);
-}
-
-
static bool HasKey(FixedArray* array, Object* key) {
int len0 = array->length();
for (int i = 0; i < len0; i++) {
@@ -4501,16 +4513,6 @@ void ConsString::ConsStringReadBlockIntoBuffer(ReadBlockBuffer* rbb,
}
-void ConsString::ConsStringIterateBody(ObjectVisitor* v) {
- IteratePointers(v, kFirstOffset, kSecondOffset + kPointerSize);
-}
-
-
-void JSGlobalPropertyCell::JSGlobalPropertyCellIterateBody(ObjectVisitor* v) {
- IteratePointers(v, kValueOffset, kValueOffset + kPointerSize);
-}
-
-
uint16_t ConsString::ConsStringGet(int index) {
ASSERT(index >= 0 && index < this->length());
@@ -4614,24 +4616,6 @@ void String::WriteToFlat(String* src,
}
-#define FIELD_ADDR(p, offset) \
- (reinterpret_cast<byte*>(p) + offset - kHeapObjectTag)
-
-void ExternalAsciiString::ExternalAsciiStringIterateBody(ObjectVisitor* v) {
- typedef v8::String::ExternalAsciiStringResource Resource;
- v->VisitExternalAsciiString(
- reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset)));
-}
-
-
-void ExternalTwoByteString::ExternalTwoByteStringIterateBody(ObjectVisitor* v) {
- typedef v8::String::ExternalStringResource Resource;
- v->VisitExternalTwoByteString(
- reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset)));
-}
-
-#undef FIELD_ADDR
-
template <typename IteratorA, typename IteratorB>
static inline bool CompareStringContents(IteratorA* ia, IteratorB* ib) {
// General slow case check. We know that the ia and ib iterators
@@ -4977,7 +4961,8 @@ void String::PrintOn(FILE* file) {
void Map::CreateBackPointers() {
DescriptorArray* descriptors = instance_descriptors();
for (int i = 0; i < descriptors->number_of_descriptors(); i++) {
- if (descriptors->GetType(i) == MAP_TRANSITION) {
+ if (descriptors->GetType(i) == MAP_TRANSITION ||
+ descriptors->GetType(i) == CONSTANT_TRANSITION) {
// Get target.
Map* target = Map::cast(descriptors->GetValue(i));
#ifdef DEBUG
@@ -5018,7 +5003,8 @@ void Map::ClearNonLiveTransitions(Object* real_prototype) {
// map is not reached again by following a back pointer from a
// non-live object.
PropertyDetails details(Smi::cast(contents->get(i + 1)));
- if (details.type() == MAP_TRANSITION) {
+ if (details.type() == MAP_TRANSITION ||
+ details.type() == CONSTANT_TRANSITION) {
Map* target = reinterpret_cast<Map*>(contents->get(i));
ASSERT(target->IsHeapObject());
if (!target->IsMarked()) {
@@ -5035,12 +5021,6 @@ void Map::ClearNonLiveTransitions(Object* real_prototype) {
}
-void Map::MapIterateBody(ObjectVisitor* v) {
- // Assumes all Object* members are contiguously allocated!
- IteratePointers(v, kPointerFieldsBeginOffset, kPointerFieldsEndOffset);
-}
-
-
Object* JSFunction::SetInstancePrototype(Object* value) {
ASSERT(value->IsJSObject());
@@ -5104,12 +5084,6 @@ Context* JSFunction::GlobalContextFromLiterals(FixedArray* literals) {
}
-void Oddball::OddballIterateBody(ObjectVisitor* v) {
- // Assumes all Object* members are contiguously allocated!
- IteratePointers(v, kToStringOffset, kToNumberOffset + kPointerSize);
-}
-
-
Object* Oddball::Initialize(const char* to_string, Object* to_number) {
Object* symbol = Heap::LookupAsciiSymbol(to_string);
if (symbol->IsFailure()) return symbol;
@@ -5282,13 +5256,6 @@ void SharedFunctionInfo::SourceCodePrint(StringStream* accumulator,
}
-void SharedFunctionInfo::SharedFunctionInfoIterateBody(ObjectVisitor* v) {
- IteratePointers(v,
- kNameOffset,
- kThisPropertyAssignmentsOffset + kPointerSize);
-}
-
-
void ObjectVisitor::VisitCodeTarget(RelocInfo* rinfo) {
ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
@@ -5310,28 +5277,6 @@ void ObjectVisitor::VisitDebugTarget(RelocInfo* rinfo) {
}
-void Code::CodeIterateBody(ObjectVisitor* v) {
- int mode_mask = RelocInfo::kCodeTargetMask |
- RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
- RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
- RelocInfo::ModeMask(RelocInfo::JS_RETURN) |
- RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
- RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
-
- // Use the relocation info pointer before it is visited by
- // the heap compaction in the next statement.
- RelocIterator it(this, mode_mask);
-
- IteratePointers(v,
- kRelocationInfoOffset,
- kRelocationInfoOffset + kPointerSize);
-
- for (; !it.done(); it.next()) {
- it.rinfo()->Visit(v);
- }
-}
-
-
void Code::Relocate(intptr_t delta) {
for (RelocIterator it(this, RelocInfo::kApplyMask); !it.done(); it.next()) {
it.rinfo()->apply(delta);
@@ -5826,13 +5771,16 @@ bool JSObject::HasElementWithInterceptor(JSObject* receiver, uint32_t index) {
v8::IndexedPropertyQuery query =
v8::ToCData<v8::IndexedPropertyQuery>(interceptor->query());
LOG(ApiIndexedPropertyAccess("interceptor-indexed-has", this, index));
- v8::Handle<v8::Boolean> result;
+ v8::Handle<v8::Integer> result;
{
// Leaving JavaScript.
VMState state(EXTERNAL);
result = query(index, info);
}
- if (!result.IsEmpty()) return result->IsTrue();
+ if (!result.IsEmpty()) {
+ ASSERT(result->IsInt32());
+ return true; // absence of property is signaled by empty handle.
+ }
} else if (!interceptor->getter()->IsUndefined()) {
v8::IndexedPropertyGetter getter =
v8::ToCData<v8::IndexedPropertyGetter>(interceptor->getter());
@@ -6151,8 +6099,10 @@ Object* JSObject::SetElement(uint32_t index, Object* value) {
// Check access rights if needed.
if (IsAccessCheckNeeded() &&
!Top::MayIndexedAccess(this, index, v8::ACCESS_SET)) {
+ HandleScope scope;
+ Handle<Object> value_handle(value);
Top::ReportFailedAccessCheck(this, v8::ACCESS_SET);
- return value;
+ return *value_handle;
}
if (IsJSGlobalProxy()) {
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index 8fa251efa..d2f6d3559 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -1106,6 +1106,51 @@ class HeapObject: public Object {
};
+#define SLOT_ADDR(obj, offset) \
+ reinterpret_cast<Object**>((obj)->address() + offset)
+
+// This class describes a body of an object of a fixed size
+// in which all pointer fields are located in the [start_offset, end_offset)
+// interval.
+template<int start_offset, int end_offset, int size>
+class FixedBodyDescriptor {
+ public:
+ static const int kStartOffset = start_offset;
+ static const int kEndOffset = end_offset;
+ static const int kSize = size;
+
+ static inline void IterateBody(HeapObject* obj, ObjectVisitor* v);
+
+ template<typename StaticVisitor>
+ static inline void IterateBody(HeapObject* obj) {
+ StaticVisitor::VisitPointers(SLOT_ADDR(obj, start_offset),
+ SLOT_ADDR(obj, end_offset));
+ }
+};
+
+
+// This class describes a body of an object of a variable size
+// in which all pointer fields are located in the [start_offset, object_size)
+// interval.
+template<int start_offset>
+class FlexibleBodyDescriptor {
+ public:
+ static const int kStartOffset = start_offset;
+
+ static inline void IterateBody(HeapObject* obj,
+ int object_size,
+ ObjectVisitor* v);
+
+ template<typename StaticVisitor>
+ static inline void IterateBody(HeapObject* obj, int object_size) {
+ StaticVisitor::VisitPointers(SLOT_ADDR(obj, start_offset),
+ SLOT_ADDR(obj, object_size));
+ }
+};
+
+#undef SLOT_ADDR
+
+
// The HeapNumber class describes heap allocated numbers that cannot be
// represented in a Smi (small integer)
class HeapNumber: public HeapObject {
@@ -1522,7 +1567,6 @@ class JSObject: public HeapObject {
// Dispatched behavior.
- void JSObjectIterateBody(int object_size, ObjectVisitor* v);
void JSObjectShortPrint(StringStream* accumulator);
#ifdef DEBUG
void JSObjectPrint();
@@ -1578,6 +1622,11 @@ class JSObject: public HeapObject {
STATIC_CHECK(kHeaderSize == Internals::kJSObjectHeaderSize);
+ class BodyDescriptor : public FlexibleBodyDescriptor<kPropertiesOffset> {
+ public:
+ static inline int SizeOf(Map* map, HeapObject* object);
+ };
+
private:
Object* GetElementWithCallback(Object* receiver,
Object* structure,
@@ -1692,8 +1741,6 @@ class FixedArray: public HeapObject {
static const int kMaxLength = (kMaxSize - kHeaderSize) / kPointerSize;
// Dispatched behavior.
- int FixedArraySize() { return SizeFor(length()); }
- void FixedArrayIterateBody(ObjectVisitor* v);
#ifdef DEBUG
void FixedArrayPrint();
void FixedArrayVerify();
@@ -1711,6 +1758,13 @@ class FixedArray: public HeapObject {
// object, the prefix of this array is sorted.
void SortPairs(FixedArray* numbers, uint32_t len);
+ class BodyDescriptor : public FlexibleBodyDescriptor<kHeaderSize> {
+ public:
+ static inline int SizeOf(Map* map, HeapObject* object) {
+ return SizeFor(reinterpret_cast<FixedArray*>(object)->length());
+ }
+ };
+
protected:
// Set operation on FixedArray without using write barriers. Can
// only be used for storing old space objects or smis.
@@ -1811,6 +1865,10 @@ class DescriptorArray: public FixedArray {
// Search the instance descriptors for given name.
inline int Search(String* name);
+ // As the above, but uses DescriptorLookupCache and updates it when
+ // necessary.
+ inline int SearchWithCache(String* name);
+
// Tells whether the name is present int the array.
bool Contains(String* name) { return kNotFound != Search(name); }
@@ -2426,7 +2484,9 @@ class ByteArray: public HeapObject {
static inline ByteArray* cast(Object* obj);
// Dispatched behavior.
- int ByteArraySize() { return SizeFor(length()); }
+ inline int ByteArraySize() {
+ return SizeFor(this->length());
+ }
#ifdef DEBUG
void ByteArrayPrint();
void ByteArrayVerify();
@@ -2847,7 +2907,10 @@ class Code: public HeapObject {
// Dispatched behavior.
int CodeSize() { return SizeFor(body_size()); }
- void CodeIterateBody(ObjectVisitor* v);
+ inline void CodeIterateBody(ObjectVisitor* v);
+
+ template<typename StaticVisitor>
+ inline void CodeIterateBody();
#ifdef DEBUG
void CodePrint();
void CodeVerify();
@@ -2893,7 +2956,6 @@ class Code: public HeapObject {
DISALLOW_IMPLICIT_CONSTRUCTORS(Code);
};
-typedef void (*Scavenger)(Map* map, HeapObject** slot, HeapObject* object);
// All heap objects have a Map that describes their structure.
// A Map contains information about:
@@ -3089,18 +3151,13 @@ class Map: public HeapObject {
void ClearNonLiveTransitions(Object* real_prototype);
// Dispatched behavior.
- void MapIterateBody(ObjectVisitor* v);
#ifdef DEBUG
void MapPrint();
void MapVerify();
#endif
- inline Scavenger scavenger();
- inline void set_scavenger(Scavenger callback);
-
- inline void Scavenge(HeapObject** slot, HeapObject* obj) {
- scavenger()(this, slot, obj);
- }
+ inline int visitor_id();
+ inline void set_visitor_id(int visitor_id);
static const int kMaxPreAllocatedPropertyFields = 255;
@@ -3154,12 +3211,17 @@ class Map: public HeapObject {
static const int kIsExtensible = 0;
static const int kFunctionWithPrototype = 1;
static const int kHasFastElements = 2;
+ static const int kStringWrapperSafeForDefaultValueOf = 3;
// Layout of the default cache. It holds alternating name and code objects.
static const int kCodeCacheEntrySize = 2;
static const int kCodeCacheEntryNameOffset = 0;
static const int kCodeCacheEntryCodeOffset = 1;
+ typedef FixedBodyDescriptor<kPointerFieldsBeginOffset,
+ kPointerFieldsEndOffset,
+ kSize> BodyDescriptor;
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(Map);
};
@@ -3414,7 +3476,6 @@ class SharedFunctionInfo: public HeapObject {
int CalculateInObjectProperties();
// Dispatched behavior.
- void SharedFunctionInfoIterateBody(ObjectVisitor* v);
// Set max_length to -1 for unlimited length.
void SourceCodePrint(StringStream* accumulator, int max_length);
#ifdef DEBUG
@@ -3503,6 +3564,10 @@ class SharedFunctionInfo: public HeapObject {
#endif
static const int kAlignedSize = POINTER_SIZE_ALIGN(kSize);
+ typedef FixedBodyDescriptor<kNameOffset,
+ kThisPropertyAssignmentsOffset + kPointerSize,
+ kSize> BodyDescriptor;
+
private:
// Bit positions in start_position_and_type.
// The source code start position is in the 30 most significant bits of
@@ -3608,7 +3673,9 @@ class JSFunction: public JSObject {
static Context* GlobalContextFromLiterals(FixedArray* literals);
// Layout descriptors.
- static const int kPrototypeOrInitialMapOffset = JSObject::kHeaderSize;
+ static const int kCodeOffset = JSObject::kHeaderSize;
+ static const int kPrototypeOrInitialMapOffset =
+ kCodeOffset + kPointerSize;
static const int kSharedFunctionInfoOffset =
kPrototypeOrInitialMapOffset + kPointerSize;
static const int kContextOffset = kSharedFunctionInfoOffset + kPointerSize;
@@ -4551,11 +4618,6 @@ class ConsString: public String {
// Casting.
static inline ConsString* cast(Object* obj);
- // Garbage collection support. This method is called during garbage
- // collection to iterate through the heap pointers in the body of
- // the ConsString.
- void ConsStringIterateBody(ObjectVisitor* v);
-
// Layout description.
static const int kFirstOffset = POINTER_SIZE_ALIGN(String::kSize);
static const int kSecondOffset = kFirstOffset + kPointerSize;
@@ -4572,6 +4634,9 @@ class ConsString: public String {
// Minimum length for a cons string.
static const int kMinLength = 13;
+ typedef FixedBodyDescriptor<kFirstOffset, kSecondOffset + kPointerSize, kSize>
+ BodyDescriptor;
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ConsString);
};
@@ -4621,7 +4686,10 @@ class ExternalAsciiString: public ExternalString {
static inline ExternalAsciiString* cast(Object* obj);
// Garbage collection support.
- void ExternalAsciiStringIterateBody(ObjectVisitor* v);
+ inline void ExternalAsciiStringIterateBody(ObjectVisitor* v);
+
+ template<typename StaticVisitor>
+ inline void ExternalAsciiStringIterateBody();
// Support for StringInputBuffer.
const unibrow::byte* ExternalAsciiStringReadBlock(unsigned* remaining,
@@ -4658,7 +4726,11 @@ class ExternalTwoByteString: public ExternalString {
static inline ExternalTwoByteString* cast(Object* obj);
// Garbage collection support.
- void ExternalTwoByteStringIterateBody(ObjectVisitor* v);
+ inline void ExternalTwoByteStringIterateBody(ObjectVisitor* v);
+
+ template<typename StaticVisitor>
+ inline void ExternalTwoByteStringIterateBody();
+
// Support for StringInputBuffer.
void ExternalTwoByteStringReadBlockIntoBuffer(ReadBlockBuffer* buffer,
@@ -4769,7 +4841,6 @@ class Oddball: public HeapObject {
static inline Oddball* cast(Object* obj);
// Dispatched behavior.
- void OddballIterateBody(ObjectVisitor* v);
#ifdef DEBUG
void OddballVerify();
#endif
@@ -4782,6 +4853,10 @@ class Oddball: public HeapObject {
static const int kToNumberOffset = kToStringOffset + kPointerSize;
static const int kSize = kToNumberOffset + kPointerSize;
+ typedef FixedBodyDescriptor<kToStringOffset,
+ kToNumberOffset + kPointerSize,
+ kSize> BodyDescriptor;
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(Oddball);
};
@@ -4795,8 +4870,6 @@ class JSGlobalPropertyCell: public HeapObject {
// Casting.
static inline JSGlobalPropertyCell* cast(Object* obj);
- // Dispatched behavior.
- void JSGlobalPropertyCellIterateBody(ObjectVisitor* v);
#ifdef DEBUG
void JSGlobalPropertyCellVerify();
void JSGlobalPropertyCellPrint();
@@ -4806,6 +4879,10 @@ class JSGlobalPropertyCell: public HeapObject {
static const int kValueOffset = HeapObject::kHeaderSize;
static const int kSize = kValueOffset + kPointerSize;
+ typedef FixedBodyDescriptor<kValueOffset,
+ kValueOffset + kPointerSize,
+ kSize> BodyDescriptor;
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSGlobalPropertyCell);
};
@@ -4826,6 +4903,10 @@ class Proxy: public HeapObject {
// Dispatched behavior.
inline void ProxyIterateBody(ObjectVisitor* v);
+
+ template<typename StaticVisitor>
+ inline void ProxyIterateBody();
+
#ifdef DEBUG
void ProxyPrint();
void ProxyVerify();
@@ -5343,6 +5424,15 @@ class ObjectVisitor BASE_EMBEDDED {
};
+class StructBodyDescriptor : public
+ FlexibleBodyDescriptor<HeapObject::kHeaderSize> {
+ public:
+ static inline int SizeOf(Map* map, HeapObject* object) {
+ return map->instance_size();
+ }
+};
+
+
// BooleanBit is a helper class for setting and getting a bit in an
// integer or Smi.
class BooleanBit : public AllStatic {
diff --git a/deps/v8/src/parser.cc b/deps/v8/src/parser.cc
index e935b7b4a..1df7c2145 100644
--- a/deps/v8/src/parser.cc
+++ b/deps/v8/src/parser.cc
@@ -3587,10 +3587,8 @@ ObjectLiteral::Property* Parser::ParseObjectLiteralGetSet(bool is_getter,
// { ... , get foo() { ... }, ... , set foo(v) { ... v ... } , ... }
// We have already read the "get" or "set" keyword.
Token::Value next = Next();
- if (next == Token::IDENTIFIER ||
- next == Token::STRING ||
- next == Token::NUMBER ||
- Token::IsKeyword(next)) {
+ // TODO(820): Allow NUMBER and STRING as well (and handle array indices).
+ if (next == Token::IDENTIFIER || Token::IsKeyword(next)) {
Handle<String> name =
factory()->LookupSymbol(scanner_.literal_string(),
scanner_.literal_length());
@@ -3652,8 +3650,7 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
factory()->LookupSymbol(scanner_.literal_string(),
scanner_.literal_length());
uint32_t index;
- if (!string.is_null() &&
- string->AsArrayIndex(&index)) {
+ if (!string.is_null() && string->AsArrayIndex(&index)) {
key = NewNumberLiteral(index);
break;
}
diff --git a/deps/v8/src/platform-linux.cc b/deps/v8/src/platform-linux.cc
index d3a44982f..f7d8609b9 100644
--- a/deps/v8/src/platform-linux.cc
+++ b/deps/v8/src/platform-linux.cc
@@ -290,9 +290,10 @@ void OS::Abort() {
void OS::DebugBreak() {
// TODO(lrn): Introduce processor define for runtime system (!= V8_ARCH_x,
// which is the architecture of generated code).
-#if (defined(__arm__) || defined(__thumb__)) && \
- defined(CAN_USE_ARMV5_INSTRUCTIONS)
+#if (defined(__arm__) || defined(__thumb__))
+# if defined(CAN_USE_ARMV5_INSTRUCTIONS)
asm("bkpt 0");
+# endif
#elif defined(__mips__)
asm("break");
#else
diff --git a/deps/v8/src/platform-nullos.cc b/deps/v8/src/platform-nullos.cc
index 656c317b6..b8392e886 100644
--- a/deps/v8/src/platform-nullos.cc
+++ b/deps/v8/src/platform-nullos.cc
@@ -100,6 +100,12 @@ double OS::DaylightSavingsOffset(double time) {
}
+int OS::GetLastError() {
+ UNIMPLEMENTED();
+ return 0;
+}
+
+
// Returns the local time offset in milliseconds east of UTC without
// taking daylight savings time into account.
double OS::LocalTimeOffset() {
diff --git a/deps/v8/src/platform-posix.cc b/deps/v8/src/platform-posix.cc
index 89f4d983d..c50d396ad 100644
--- a/deps/v8/src/platform-posix.cc
+++ b/deps/v8/src/platform-posix.cc
@@ -108,6 +108,11 @@ double OS::DaylightSavingsOffset(double time) {
}
+int OS::GetLastError() {
+ return errno;
+}
+
+
// ----------------------------------------------------------------------------
// POSIX stdio support.
//
@@ -238,7 +243,7 @@ bool POSIXSocket::Bind(const int port) {
addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
addr.sin_port = htons(port);
int status = bind(socket_,
- reinterpret_cast<struct sockaddr *>(&addr),
+ BitCast<struct sockaddr *>(&addr),
sizeof(addr));
return status == 0;
}
diff --git a/deps/v8/src/platform-win32.cc b/deps/v8/src/platform-win32.cc
index af3e9b2f3..86314a805 100644
--- a/deps/v8/src/platform-win32.cc
+++ b/deps/v8/src/platform-win32.cc
@@ -651,6 +651,11 @@ double OS::DaylightSavingsOffset(double time) {
}
+int OS::GetLastError() {
+ return ::GetLastError();
+}
+
+
// ----------------------------------------------------------------------------
// Win32 console output.
//
diff --git a/deps/v8/src/platform.h b/deps/v8/src/platform.h
index 1091ba6fb..b75867cfc 100644
--- a/deps/v8/src/platform.h
+++ b/deps/v8/src/platform.h
@@ -165,6 +165,9 @@ class OS {
// Returns the daylight savings offset for the given time.
static double DaylightSavingsOffset(double time);
+ // Returns last OS error.
+ static int GetLastError();
+
static FILE* FOpen(const char* path, const char* mode);
// Log file open mode is platform-dependent due to line ends issues.
diff --git a/deps/v8/src/platform.h.orig b/deps/v8/src/platform.h.orig
new file mode 100644
index 000000000..7539fd2dd
--- /dev/null
+++ b/deps/v8/src/platform.h.orig
@@ -0,0 +1,580 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This module contains the platform-specific code. This make the rest of the
+// code less dependent on operating system, compilers and runtime libraries.
+// This module does specifically not deal with differences between different
+// processor architecture.
+// The platform classes have the same definition for all platforms. The
+// implementation for a particular platform is put in platform_<os>.cc.
+// The build system then uses the implementation for the target platform.
+//
+// This design has been chosen because it is simple and fast. Alternatively,
+// the platform dependent classes could have been implemented using abstract
+// superclasses with virtual methods and having specializations for each
+// platform. This design was rejected because it was more complicated and
+// slower. It would require factory methods for selecting the right
+// implementation and the overhead of virtual methods for performance
+// sensitive like mutex locking/unlocking.
+
+#ifndef V8_PLATFORM_H_
+#define V8_PLATFORM_H_
+
+#define V8_INFINITY INFINITY
+
+// Windows specific stuff.
+#ifdef WIN32
+
+// Microsoft Visual C++ specific stuff.
+#ifdef _MSC_VER
+
+enum {
+ FP_NAN,
+ FP_INFINITE,
+ FP_ZERO,
+ FP_SUBNORMAL,
+ FP_NORMAL
+};
+
+#undef V8_INFINITY
+#define V8_INFINITY HUGE_VAL
+
+namespace v8 {
+namespace internal {
+int isfinite(double x);
+} }
+int isnan(double x);
+int isinf(double x);
+int isless(double x, double y);
+int isgreater(double x, double y);
+int fpclassify(double x);
+int signbit(double x);
+
+int strncasecmp(const char* s1, const char* s2, int n);
+
+#endif // _MSC_VER
+
+// Random is missing on both Visual Studio and MinGW.
+int random();
+
+#endif // WIN32
+
+
+#ifdef __sun
+# ifndef signbit
+int signbit(double x);
+# endif
+#endif
+
+
+// GCC specific stuff
+#ifdef __GNUC__
+
+// Needed for va_list on at least MinGW and Android.
+#include <stdarg.h>
+
+#define __GNUC_VERSION__ (__GNUC__ * 10000 + __GNUC_MINOR__ * 100)
+
+// Unfortunately, the INFINITY macro cannot be used with the '-pedantic'
+// warning flag and certain versions of GCC due to a bug:
+// http://gcc.gnu.org/bugzilla/show_bug.cgi?id=11931
+// For now, we use the more involved template-based version from <limits>, but
+// only when compiling with GCC versions affected by the bug (2.96.x - 4.0.x)
+// __GNUC_PREREQ is not defined in GCC for Mac OS X, so we define our own macro
+#if __GNUC_VERSION__ >= 29600 && __GNUC_VERSION__ < 40100
+#include <limits>
+#undef V8_INFINITY
+#define V8_INFINITY std::numeric_limits<double>::infinity()
+#endif
+
+#endif // __GNUC__
+
+namespace v8 {
+namespace internal {
+
+// Use AtomicWord for a machine-sized pointer. It is assumed that
+// reads and writes of naturally aligned values of this type are atomic.
+typedef intptr_t AtomicWord;
+
+class Semaphore;
+
+double ceiling(double x);
+double modulo(double x, double y);
+
+// Forward declarations.
+class Socket;
+
+// ----------------------------------------------------------------------------
+// OS
+//
+// This class has static methods for the different platform specific
+// functions. Add methods here to cope with differences between the
+// supported platforms.
+
+class OS {
+ public:
+ // Initializes the platform OS support. Called once at VM startup.
+ static void Setup();
+
+ // Returns the accumulated user time for thread. This routine
+ // can be used for profiling. The implementation should
+ // strive for high-precision timer resolution, preferable
+ // micro-second resolution.
+ static int GetUserTime(uint32_t* secs, uint32_t* usecs);
+
+ // Get a tick counter normalized to one tick per microsecond.
+ // Used for calculating time intervals.
+ static int64_t Ticks();
+
+ // Returns current time as the number of milliseconds since
+ // 00:00:00 UTC, January 1, 1970.
+ static double TimeCurrentMillis();
+
+ // Returns a string identifying the current time zone. The
+ // timestamp is used for determining if DST is in effect.
+ static const char* LocalTimezone(double time);
+
+ // Returns the local time offset in milliseconds east of UTC without
+ // taking daylight savings time into account.
+ static double LocalTimeOffset();
+
+ // Returns the daylight savings offset for the given time.
+ static double DaylightSavingsOffset(double time);
+
+ // Returns last OS error.
+ static int GetLastError();
+
+ static FILE* FOpen(const char* path, const char* mode);
+
+ // Log file open mode is platform-dependent due to line ends issues.
+ static const char* LogFileOpenMode;
+
+ // Print output to console. This is mostly used for debugging output.
+ // On platforms that has standard terminal output, the output
+ // should go to stdout.
+ static void Print(const char* format, ...);
+ static void VPrint(const char* format, va_list args);
+
+ // Print error output to console. This is mostly used for error message
+ // output. On platforms that has standard terminal output, the output
+ // should go to stderr.
+ static void PrintError(const char* format, ...);
+ static void VPrintError(const char* format, va_list args);
+
+ // Allocate/Free memory used by JS heap. Pages are readable/writable, but
+ // they are not guaranteed to be executable unless 'executable' is true.
+ // Returns the address of allocated memory, or NULL if failed.
+ static void* Allocate(const size_t requested,
+ size_t* allocated,
+ bool is_executable);
+ static void Free(void* address, const size_t size);
+ // Get the Alignment guaranteed by Allocate().
+ static size_t AllocateAlignment();
+
+#ifdef ENABLE_HEAP_PROTECTION
+ // Protect/unprotect a block of memory by marking it read-only/writable.
+ static void Protect(void* address, size_t size);
+ static void Unprotect(void* address, size_t size, bool is_executable);
+#endif
+
+ // Returns an indication of whether a pointer is in a space that
+ // has been allocated by Allocate(). This method may conservatively
+ // always return false, but giving more accurate information may
+ // improve the robustness of the stack dump code in the presence of
+ // heap corruption.
+ static bool IsOutsideAllocatedSpace(void* pointer);
+
+ // Sleep for a number of milliseconds.
+ static void Sleep(const int milliseconds);
+
+ // Abort the current process.
+ static void Abort();
+
+ // Debug break.
+ static void DebugBreak();
+
+ // Walk the stack.
+ static const int kStackWalkError = -1;
+ static const int kStackWalkMaxNameLen = 256;
+ static const int kStackWalkMaxTextLen = 256;
+ struct StackFrame {
+ void* address;
+ char text[kStackWalkMaxTextLen];
+ };
+
+ static int StackWalk(Vector<StackFrame> frames);
+
+ // Factory method for creating platform dependent Mutex.
+ // Please use delete to reclaim the storage for the returned Mutex.
+ static Mutex* CreateMutex();
+
+ // Factory method for creating platform dependent Semaphore.
+ // Please use delete to reclaim the storage for the returned Semaphore.
+ static Semaphore* CreateSemaphore(int count);
+
+ // Factory method for creating platform dependent Socket.
+ // Please use delete to reclaim the storage for the returned Socket.
+ static Socket* CreateSocket();
+
+ class MemoryMappedFile {
+ public:
+ static MemoryMappedFile* create(const char* name, int size, void* initial);
+ virtual ~MemoryMappedFile() { }
+ virtual void* memory() = 0;
+ };
+
+ // Safe formatting print. Ensures that str is always null-terminated.
+ // Returns the number of chars written, or -1 if output was truncated.
+ static int SNPrintF(Vector<char> str, const char* format, ...);
+ static int VSNPrintF(Vector<char> str,
+ const char* format,
+ va_list args);
+
+ static char* StrChr(char* str, int c);
+ static void StrNCpy(Vector<char> dest, const char* src, size_t n);
+
+ // Support for profiler. Can do nothing, in which case ticks
+ // occuring in shared libraries will not be properly accounted
+ // for.
+ static void LogSharedLibraryAddresses();
+
+ // The return value indicates the CPU features we are sure of because of the
+ // OS. For example MacOSX doesn't run on any x86 CPUs that don't have SSE2
+ // instructions.
+ // This is a little messy because the interpretation is subject to the cross
+ // of the CPU and the OS. The bits in the answer correspond to the bit
+ // positions indicated by the members of the CpuFeature enum from globals.h
+ static uint64_t CpuFeaturesImpliedByPlatform();
+
+ // Returns the double constant NAN
+ static double nan_value();
+
+ // Support runtime detection of VFP3 on ARM CPUs.
+ static bool ArmCpuHasFeature(CpuFeature feature);
+
+ // Returns the activation frame alignment constraint or zero if
+ // the platform doesn't care. Guaranteed to be a power of two.
+ static int ActivationFrameAlignment();
+
+ static void ReleaseStore(volatile AtomicWord* ptr, AtomicWord value);
+
+ private:
+ static const int msPerSecond = 1000;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(OS);
+};
+
+
+class VirtualMemory {
+ public:
+ // Reserves virtual memory with size.
+ explicit VirtualMemory(size_t size);
+ ~VirtualMemory();
+
+ // Returns whether the memory has been reserved.
+ bool IsReserved();
+
+ // Returns the start address of the reserved memory.
+ void* address() {
+ ASSERT(IsReserved());
+ return address_;
+ };
+
+ // Returns the size of the reserved memory.
+ size_t size() { return size_; }
+
+ // Commits real memory. Returns whether the operation succeeded.
+ bool Commit(void* address, size_t size, bool is_executable);
+
+ // Uncommit real memory. Returns whether the operation succeeded.
+ bool Uncommit(void* address, size_t size);
+
+ private:
+ void* address_; // Start address of the virtual memory.
+ size_t size_; // Size of the virtual memory.
+};
+
+
+// ----------------------------------------------------------------------------
+// ThreadHandle
+//
+// A ThreadHandle represents a thread identifier for a thread. The ThreadHandle
+// does not own the underlying os handle. Thread handles can be used for
+// refering to threads and testing equality.
+
+class ThreadHandle {
+ public:
+ enum Kind { SELF, INVALID };
+ explicit ThreadHandle(Kind kind);
+
+ // Destructor.
+ ~ThreadHandle();
+
+ // Test for thread running.
+ bool IsSelf() const;
+
+ // Test for valid thread handle.
+ bool IsValid() const;
+
+ // Get platform-specific data.
+ class PlatformData;
+ PlatformData* thread_handle_data() { return data_; }
+
+ // Initialize the handle to kind
+ void Initialize(Kind kind);
+
+ private:
+ PlatformData* data_; // Captures platform dependent data.
+};
+
+
+// ----------------------------------------------------------------------------
+// Thread
+//
+// Thread objects are used for creating and running threads. When the start()
+// method is called the new thread starts running the run() method in the new
+// thread. The Thread object should not be deallocated before the thread has
+// terminated.
+
+class Thread: public ThreadHandle {
+ public:
+ // Opaque data type for thread-local storage keys.
+ enum LocalStorageKey {};
+
+ // Create new thread.
+ Thread();
+ virtual ~Thread();
+
+ // Start new thread by calling the Run() method in the new thread.
+ void Start();
+
+ // Wait until thread terminates.
+ void Join();
+
+ // Abstract method for run handler.
+ virtual void Run() = 0;
+
+ // Thread-local storage.
+ static LocalStorageKey CreateThreadLocalKey();
+ static void DeleteThreadLocalKey(LocalStorageKey key);
+ static void* GetThreadLocal(LocalStorageKey key);
+ static int GetThreadLocalInt(LocalStorageKey key) {
+ return static_cast<int>(reinterpret_cast<intptr_t>(GetThreadLocal(key)));
+ }
+ static void SetThreadLocal(LocalStorageKey key, void* value);
+ static void SetThreadLocalInt(LocalStorageKey key, int value) {
+ SetThreadLocal(key, reinterpret_cast<void*>(static_cast<intptr_t>(value)));
+ }
+ static bool HasThreadLocal(LocalStorageKey key) {
+ return GetThreadLocal(key) != NULL;
+ }
+
+ // A hint to the scheduler to let another thread run.
+ static void YieldCPU();
+
+ private:
+ class PlatformData;
+ PlatformData* data_;
+ DISALLOW_COPY_AND_ASSIGN(Thread);
+};
+
+
+// ----------------------------------------------------------------------------
+// Mutex
+//
+// Mutexes are used for serializing access to non-reentrant sections of code.
+// The implementations of mutex should allow for nested/recursive locking.
+
+class Mutex {
+ public:
+ virtual ~Mutex() {}
+
+ // Locks the given mutex. If the mutex is currently unlocked, it becomes
+ // locked and owned by the calling thread, and immediately. If the mutex
+ // is already locked by another thread, suspends the calling thread until
+ // the mutex is unlocked.
+ virtual int Lock() = 0;
+
+ // Unlocks the given mutex. The mutex is assumed to be locked and owned by
+ // the calling thread on entrance.
+ virtual int Unlock() = 0;
+};
+
+
+// ----------------------------------------------------------------------------
+// ScopedLock
+//
+// Stack-allocated ScopedLocks provide block-scoped locking and unlocking
+// of a mutex.
+class ScopedLock {
+ public:
+ explicit ScopedLock(Mutex* mutex): mutex_(mutex) {
+ mutex_->Lock();
+ }
+ ~ScopedLock() {
+ mutex_->Unlock();
+ }
+
+ private:
+ Mutex* mutex_;
+ DISALLOW_COPY_AND_ASSIGN(ScopedLock);
+};
+
+
+// ----------------------------------------------------------------------------
+// Semaphore
+//
+// A semaphore object is a synchronization object that maintains a count. The
+// count is decremented each time a thread completes a wait for the semaphore
+// object and incremented each time a thread signals the semaphore. When the
+// count reaches zero, threads waiting for the semaphore blocks until the
+// count becomes non-zero.
+
+class Semaphore {
+ public:
+ virtual ~Semaphore() {}
+
+ // Suspends the calling thread until the semaphore counter is non zero
+ // and then decrements the semaphore counter.
+ virtual void Wait() = 0;
+
+ // Suspends the calling thread until the counter is non zero or the timeout
+ // time has passsed. If timeout happens the return value is false and the
+ // counter is unchanged. Otherwise the semaphore counter is decremented and
+ // true is returned. The timeout value is specified in microseconds.
+ virtual bool Wait(int timeout) = 0;
+
+ // Increments the semaphore counter.
+ virtual void Signal() = 0;
+};
+
+
+// ----------------------------------------------------------------------------
+// Socket
+//
+
+class Socket {
+ public:
+ virtual ~Socket() {}
+
+ // Server initialization.
+ virtual bool Bind(const int port) = 0;
+ virtual bool Listen(int backlog) const = 0;
+ virtual Socket* Accept() const = 0;
+
+ // Client initialization.
+ virtual bool Connect(const char* host, const char* port) = 0;
+
+ // Shutdown socket for both read and write. This causes blocking Send and
+ // Receive calls to exit. After Shutdown the Socket object cannot be used for
+ // any communication.
+ virtual bool Shutdown() = 0;
+
+ // Data Transimission
+ virtual int Send(const char* data, int len) const = 0;
+ virtual int Receive(char* data, int len) const = 0;
+
+ // Set the value of the SO_REUSEADDR socket option.
+ virtual bool SetReuseAddress(bool reuse_address) = 0;
+
+ virtual bool IsValid() const = 0;
+
+ static bool Setup();
+ static int LastError();
+ static uint16_t HToN(uint16_t value);
+ static uint16_t NToH(uint16_t value);
+ static uint32_t HToN(uint32_t value);
+ static uint32_t NToH(uint32_t value);
+};
+
+
+// ----------------------------------------------------------------------------
+// Sampler
+//
+// A sampler periodically samples the state of the VM and optionally
+// (if used for profiling) the program counter and stack pointer for
+// the thread that created it.
+
+// TickSample captures the information collected for each sample.
+class TickSample {
+ public:
+ TickSample()
+ : state(OTHER),
+ pc(NULL),
+ sp(NULL),
+ fp(NULL),
+ function(NULL),
+ frames_count(0) {}
+ StateTag state; // The state of the VM.
+ Address pc; // Instruction pointer.
+ Address sp; // Stack pointer.
+ Address fp; // Frame pointer.
+ Address function; // The last called JS function.
+ static const int kMaxFramesCount = 64;
+ Address stack[kMaxFramesCount]; // Call stack.
+ int frames_count; // Number of captured frames.
+};
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+class Sampler {
+ public:
+ // Initialize sampler.
+ explicit Sampler(int interval, bool profiling);
+ virtual ~Sampler();
+
+ // Performs stack sampling.
+ virtual void SampleStack(TickSample* sample) = 0;
+
+ // This method is called for each sampling period with the current
+ // program counter.
+ virtual void Tick(TickSample* sample) = 0;
+
+ // Start and stop sampler.
+ void Start();
+ void Stop();
+
+ // Is the sampler used for profiling.
+ inline bool IsProfiling() { return profiling_; }
+
+ // Whether the sampler is running (that is, consumes resources).
+ inline bool IsActive() { return active_; }
+
+ class PlatformData;
+
+ private:
+ const int interval_;
+ const bool profiling_;
+ bool active_;
+ PlatformData* data_; // Platform specific data.
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Sampler);
+};
+
+#endif // ENABLE_LOGGING_AND_PROFILING
+
+} } // namespace v8::internal
+
+#endif // V8_PLATFORM_H_
diff --git a/deps/v8/src/profile-generator-inl.h b/deps/v8/src/profile-generator-inl.h
index ea9bc9876..0c50581ab 100644
--- a/deps/v8/src/profile-generator-inl.h
+++ b/deps/v8/src/profile-generator-inl.h
@@ -97,13 +97,6 @@ void CodeMap::DeleteCode(Address addr) {
}
-bool CpuProfilesCollection::is_last_profile() {
- // Called from VM thread, and only it can mutate the list,
- // so no locking is needed here.
- return current_profiles_.length() == 1;
-}
-
-
const char* CpuProfilesCollection::GetFunctionName(String* name) {
return GetFunctionName(GetName(name));
}
@@ -130,17 +123,6 @@ CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) {
}
}
-
-template<class Visitor>
-void HeapEntriesMap::Apply(Visitor* visitor) {
- for (HashMap::Entry* p = entries_.Start();
- p != NULL;
- p = entries_.Next(p)) {
- if (!IsAlias(p->value))
- visitor->Apply(reinterpret_cast<HeapEntry*>(p->value));
- }
-}
-
} } // namespace v8::internal
#endif // ENABLE_LOGGING_AND_PROFILING
diff --git a/deps/v8/src/profile-generator.cc b/deps/v8/src/profile-generator.cc
index 7054b1259..cd46badf0 100644
--- a/deps/v8/src/profile-generator.cc
+++ b/deps/v8/src/profile-generator.cc
@@ -542,13 +542,6 @@ CpuProfile* CpuProfilesCollection::StopProfiling(int security_token_id,
}
-CpuProfile* CpuProfilesCollection::StopProfiling(int security_token_id,
- String* title,
- double actual_sampling_rate) {
- return StopProfiling(security_token_id, GetName(title), actual_sampling_rate);
-}
-
-
CpuProfile* CpuProfilesCollection::GetProfile(int security_token_id,
unsigned uid) {
HashMap::Entry* entry = profiles_uids_.Lookup(reinterpret_cast<void*>(uid),
@@ -574,6 +567,15 @@ CpuProfile* CpuProfilesCollection::GetProfile(int security_token_id,
}
+bool CpuProfilesCollection::IsLastProfile(const char* title) {
+ // Called from VM thread, and only it can mutate the list,
+ // so no locking is needed here.
+ if (current_profiles_.length() != 1) return false;
+ return StrLength(title) == 0
+ || strcmp(current_profiles_[0]->title(), title) == 0;
+}
+
+
int CpuProfilesCollection::TokenToIndex(int security_token_id) {
ASSERT(TokenEnumerator::kNoSecurityToken == -1);
return security_token_id + 1; // kNoSecurityToken -> 0, 0 -> 1, ...
@@ -798,83 +800,102 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) {
}
-HeapGraphEdge::HeapGraphEdge(Type type,
- const char* name,
- HeapEntry* from,
- HeapEntry* to)
- : type_(type), name_(name), from_(from), to_(to) {
- ASSERT(type_ == CONTEXT_VARIABLE || type_ == PROPERTY || type_ == INTERNAL);
-}
-
-
-HeapGraphEdge::HeapGraphEdge(int index,
- HeapEntry* from,
- HeapEntry* to)
- : type_(ELEMENT), index_(index), from_(from), to_(to) {
-}
-
-
-static void DeleteHeapGraphEdge(HeapGraphEdge** edge_ptr) {
- delete *edge_ptr;
+void HeapGraphEdge::Init(
+ int child_index, Type type, const char* name, HeapEntry* to) {
+ ASSERT(type == kContextVariable || type == kProperty || type == kInternal);
+ child_index_ = child_index;
+ type_ = type;
+ name_ = name;
+ to_ = to;
}
-static void DeleteHeapGraphPath(HeapGraphPath** path_ptr) {
- delete *path_ptr;
+void HeapGraphEdge::Init(int child_index, int index, HeapEntry* to) {
+ child_index_ = child_index;
+ type_ = kElement;
+ index_ = index;
+ to_ = to;
}
-HeapEntry::~HeapEntry() {
- children_.Iterate(DeleteHeapGraphEdge);
- retaining_paths_.Iterate(DeleteHeapGraphPath);
+HeapEntry* HeapGraphEdge::From() {
+ return reinterpret_cast<HeapEntry*>(this - child_index_) - 1;
}
-void HeapEntry::AddEdge(HeapGraphEdge* edge) {
- children_.Add(edge);
- edge->to()->retainers_.Add(edge);
+void HeapEntry::Init(HeapSnapshot* snapshot,
+ int children_count,
+ int retainers_count) {
+ Init(snapshot, kInternal, "", 0, 0, children_count, retainers_count);
}
-void HeapEntry::SetClosureReference(const char* name, HeapEntry* entry) {
- AddEdge(
- new HeapGraphEdge(HeapGraphEdge::CONTEXT_VARIABLE, name, this, entry));
+void HeapEntry::Init(HeapSnapshot* snapshot,
+ Type type,
+ const char* name,
+ uint64_t id,
+ int self_size,
+ int children_count,
+ int retainers_count) {
+ snapshot_ = snapshot;
+ type_ = type;
+ painted_ = kUnpainted;
+ calculated_data_index_ = kNoCalculatedData;
+ name_ = name;
+ id_ = id;
+ self_size_ = self_size;
+ children_count_ = children_count;
+ retainers_count_ = retainers_count;
}
-void HeapEntry::SetElementReference(int index, HeapEntry* entry) {
- AddEdge(new HeapGraphEdge(index, this, entry));
+void HeapEntry::SetNamedReference(HeapGraphEdge::Type type,
+ int child_index,
+ const char* name,
+ HeapEntry* entry,
+ int retainer_index) {
+ children_arr()[child_index].Init(child_index, type, name, entry);
+ entry->retainers_arr()[retainer_index] = children_arr() + child_index;
}
-void HeapEntry::SetInternalReference(const char* name, HeapEntry* entry) {
- AddEdge(new HeapGraphEdge(HeapGraphEdge::INTERNAL, name, this, entry));
+void HeapEntry::SetElementReference(
+ int child_index, int index, HeapEntry* entry, int retainer_index) {
+ children_arr()[child_index].Init(child_index, index, entry);
+ entry->retainers_arr()[retainer_index] = children_arr() + child_index;
}
-void HeapEntry::SetPropertyReference(const char* name, HeapEntry* entry) {
- AddEdge(new HeapGraphEdge(HeapGraphEdge::PROPERTY, name, this, entry));
+void HeapEntry::SetUnidirElementReference(
+ int child_index, int index, HeapEntry* entry) {
+ children_arr()[child_index].Init(child_index, index, entry);
}
-void HeapEntry::SetAutoIndexReference(HeapEntry* entry) {
- SetElementReference(next_auto_index_++, entry);
-}
-
-
-void HeapEntry::SetUnidirAutoIndexReference(HeapEntry* entry) {
- children_.Add(new HeapGraphEdge(next_auto_index_++, this, entry));
+int HeapEntry::ReachableSize() {
+ if (calculated_data_index_ == kNoCalculatedData) {
+ calculated_data_index_ = snapshot_->AddCalculatedData();
+ }
+ return snapshot_->GetCalculatedData(
+ calculated_data_index_).ReachableSize(this);
}
-int HeapEntry::TotalSize() {
- return total_size_ != kUnknownSize ? total_size_ : CalculateTotalSize();
+int HeapEntry::RetainedSize() {
+ if (calculated_data_index_ == kNoCalculatedData) {
+ calculated_data_index_ = snapshot_->AddCalculatedData();
+ }
+ return snapshot_->GetCalculatedData(
+ calculated_data_index_).RetainedSize(this);
}
-int HeapEntry::NonSharedTotalSize() {
- return non_shared_total_size_ != kUnknownSize ?
- non_shared_total_size_ : CalculateNonSharedTotalSize();
+List<HeapGraphPath*>* HeapEntry::GetRetainingPaths() {
+ if (calculated_data_index_ == kNoCalculatedData) {
+ calculated_data_index_ = snapshot_->AddCalculatedData();
+ }
+ return snapshot_->GetCalculatedData(
+ calculated_data_index_).GetRetainingPaths(this);
}
@@ -882,16 +903,16 @@ template<class Visitor>
void HeapEntry::ApplyAndPaintAllReachable(Visitor* visitor) {
List<HeapEntry*> list(10);
list.Add(this);
- this->PaintReachable();
+ this->paint_reachable();
visitor->Apply(this);
while (!list.is_empty()) {
HeapEntry* entry = list.RemoveLast();
- const int children_count = entry->children_.length();
- for (int i = 0; i < children_count; ++i) {
- HeapEntry* child = entry->children_[i]->to();
+ Vector<HeapGraphEdge> children = entry->children();
+ for (int i = 0; i < children.length(); ++i) {
+ HeapEntry* child = children[i].to();
if (!child->painted_reachable()) {
list.Add(child);
- child->PaintReachable();
+ child->paint_reachable();
visitor->Apply(child);
}
}
@@ -910,78 +931,158 @@ void HeapEntry::PaintAllReachable() {
}
-class TotalSizeCalculator {
- public:
- TotalSizeCalculator()
- : total_size_(0) {
+void HeapEntry::Print(int max_depth, int indent) {
+ OS::Print("%6d %6d %6d [%ld] ",
+ self_size(), ReachableSize(), RetainedSize(), id_);
+ if (type() != kString) {
+ OS::Print("%s %.40s\n", TypeAsString(), name_);
+ } else {
+ OS::Print("\"");
+ const char* c = name_;
+ while (*c && (c - name_) <= 40) {
+ if (*c != '\n')
+ OS::Print("%c", *c);
+ else
+ OS::Print("\\n");
+ ++c;
+ }
+ OS::Print("\"\n");
}
+ if (--max_depth == 0) return;
+ Vector<HeapGraphEdge> ch = children();
+ for (int i = 0; i < ch.length(); ++i) {
+ HeapGraphEdge& edge = ch[i];
+ switch (edge.type()) {
+ case HeapGraphEdge::kContextVariable:
+ OS::Print(" %*c #%s: ", indent, ' ', edge.name());
+ break;
+ case HeapGraphEdge::kElement:
+ OS::Print(" %*c %d: ", indent, ' ', edge.index());
+ break;
+ case HeapGraphEdge::kInternal:
+ OS::Print(" %*c $%s: ", indent, ' ', edge.name());
+ break;
+ case HeapGraphEdge::kProperty:
+ OS::Print(" %*c %s: ", indent, ' ', edge.name());
+ break;
+ default:
+ OS::Print("!!! unknown edge type: %d ", edge.type());
+ }
+ edge.to()->Print(max_depth, indent + 2);
+ }
+}
- int total_size() const { return total_size_; }
- void Apply(HeapEntry* entry) {
- total_size_ += entry->self_size();
+const char* HeapEntry::TypeAsString() {
+ switch (type()) {
+ case kInternal: return "/internal/";
+ case kObject: return "/object/";
+ case kClosure: return "/closure/";
+ case kString: return "/string/";
+ case kCode: return "/code/";
+ case kArray: return "/array/";
+ default: return "???";
}
+}
- private:
- int total_size_;
-};
-int HeapEntry::CalculateTotalSize() {
- snapshot_->ClearPaint();
- TotalSizeCalculator calc;
- ApplyAndPaintAllReachable(&calc);
- total_size_ = calc.total_size();
- return total_size_;
+int HeapEntry::EntriesSize(int entries_count,
+ int children_count,
+ int retainers_count) {
+ return sizeof(HeapEntry) * entries_count // NOLINT
+ + sizeof(HeapGraphEdge) * children_count // NOLINT
+ + sizeof(HeapGraphEdge*) * retainers_count; // NOLINT
}
-class NonSharedSizeCalculator {
+static void DeleteHeapGraphPath(HeapGraphPath** path_ptr) {
+ delete *path_ptr;
+}
+
+void HeapEntryCalculatedData::Dispose() {
+ if (retaining_paths_ != NULL) retaining_paths_->Iterate(DeleteHeapGraphPath);
+ delete retaining_paths_;
+}
+
+
+int HeapEntryCalculatedData::ReachableSize(HeapEntry* entry) {
+ if (reachable_size_ == kUnknownSize) CalculateSizes(entry);
+ return reachable_size_;
+}
+
+
+int HeapEntryCalculatedData::RetainedSize(HeapEntry* entry) {
+ if (retained_size_ == kUnknownSize) CalculateSizes(entry);
+ return retained_size_;
+}
+
+
+class ReachableSizeCalculator {
public:
- NonSharedSizeCalculator()
- : non_shared_total_size_(0) {
+ ReachableSizeCalculator()
+ : reachable_size_(0) {
}
- int non_shared_total_size() const { return non_shared_total_size_; }
+ int reachable_size() const { return reachable_size_; }
void Apply(HeapEntry* entry) {
- if (entry->painted_reachable()) {
- non_shared_total_size_ += entry->self_size();
+ reachable_size_ += entry->self_size();
+ }
+
+ private:
+ int reachable_size_;
+};
+
+class RetainedSizeCalculator {
+ public:
+ RetainedSizeCalculator()
+ : retained_size_(0) {
+ }
+
+ int reained_size() const { return retained_size_; }
+
+ void Apply(HeapEntry** entry_ptr) {
+ if ((*entry_ptr)->painted_reachable()) {
+ retained_size_ += (*entry_ptr)->self_size();
}
}
private:
- int non_shared_total_size_;
+ int retained_size_;
};
-int HeapEntry::CalculateNonSharedTotalSize() {
- // To calculate non-shared total size, first we paint all reachable
- // nodes in one color, then we paint all nodes reachable from other
- // nodes with a different color. Then we consider only nodes painted
- // with the first color for calculating the total size.
- snapshot_->ClearPaint();
- PaintAllReachable();
+void HeapEntryCalculatedData::CalculateSizes(HeapEntry* entry) {
+ // To calculate retained size, first we paint all reachable nodes in
+ // one color (and calculate reachable size as a byproduct), then we
+ // paint (or re-paint) all nodes reachable from other nodes with a
+ // different color. Then we consider only nodes painted with the
+ // first color for calculating the retained size.
+ entry->snapshot()->ClearPaint();
+ ReachableSizeCalculator rch_size_calc;
+ entry->ApplyAndPaintAllReachable(&rch_size_calc);
+ reachable_size_ = rch_size_calc.reachable_size();
List<HeapEntry*> list(10);
- if (this != snapshot_->root()) {
- list.Add(snapshot_->root());
- snapshot_->root()->PaintReachableFromOthers();
+ HeapEntry* root = entry->snapshot()->root();
+ if (entry != root) {
+ list.Add(root);
+ root->paint_reachable_from_others();
}
while (!list.is_empty()) {
- HeapEntry* entry = list.RemoveLast();
- const int children_count = entry->children_.length();
- for (int i = 0; i < children_count; ++i) {
- HeapEntry* child = entry->children_[i]->to();
- if (child != this && child->not_painted_reachable_from_others()) {
+ HeapEntry* curr = list.RemoveLast();
+ Vector<HeapGraphEdge> children = curr->children();
+ for (int i = 0; i < children.length(); ++i) {
+ HeapEntry* child = children[i].to();
+ if (child != entry && child->not_painted_reachable_from_others()) {
list.Add(child);
- child->PaintReachableFromOthers();
+ child->paint_reachable_from_others();
}
}
}
- NonSharedSizeCalculator calculator;
- snapshot_->IterateEntries(&calculator);
- non_shared_total_size_ = calculator.non_shared_total_size();
- return non_shared_total_size_;
+ RetainedSizeCalculator ret_size_calc;
+ entry->snapshot()->IterateEntries(&ret_size_calc);
+ retained_size_ = ret_size_calc.reained_size();
}
@@ -1019,125 +1120,34 @@ class CachedHeapGraphPath {
};
-const List<HeapGraphPath*>* HeapEntry::GetRetainingPaths() {
- if (retaining_paths_.length() == 0 && retainers_.length() != 0) {
+List<HeapGraphPath*>* HeapEntryCalculatedData::GetRetainingPaths(
+ HeapEntry* entry) {
+ if (retaining_paths_ == NULL) retaining_paths_ = new List<HeapGraphPath*>(4);
+ if (retaining_paths_->length() == 0 && entry->retainers().length() != 0) {
CachedHeapGraphPath path;
- FindRetainingPaths(this, &path);
+ FindRetainingPaths(entry, &path);
}
- return &retaining_paths_;
+ return retaining_paths_;
}
-void HeapEntry::FindRetainingPaths(HeapEntry* node,
- CachedHeapGraphPath* prev_path) {
- for (int i = 0; i < node->retainers_.length(); ++i) {
- HeapGraphEdge* ret_edge = node->retainers_[i];
- if (prev_path->ContainsNode(ret_edge->from())) continue;
- if (ret_edge->from() != snapshot_->root()) {
+void HeapEntryCalculatedData::FindRetainingPaths(
+ HeapEntry* entry,
+ CachedHeapGraphPath* prev_path) {
+ Vector<HeapGraphEdge*> retainers = entry->retainers();
+ for (int i = 0; i < retainers.length(); ++i) {
+ HeapGraphEdge* ret_edge = retainers[i];
+ if (prev_path->ContainsNode(ret_edge->From())) continue;
+ if (ret_edge->From() != entry->snapshot()->root()) {
CachedHeapGraphPath path(*prev_path);
path.Add(ret_edge);
- FindRetainingPaths(ret_edge->from(), &path);
+ FindRetainingPaths(ret_edge->From(), &path);
} else {
HeapGraphPath* ret_path = new HeapGraphPath(*prev_path->path());
ret_path->Set(0, ret_edge);
- retaining_paths_.Add(ret_path);
- }
- }
-}
-
-
-static void RemoveEdge(List<HeapGraphEdge*>* list, HeapGraphEdge* edge) {
- for (int i = 0; i < list->length(); ) {
- if (list->at(i) == edge) {
- list->Remove(i);
- return;
- } else {
- ++i;
+ retaining_paths_->Add(ret_path);
}
}
- UNREACHABLE();
-}
-
-
-void HeapEntry::RemoveChild(HeapGraphEdge* edge) {
- RemoveEdge(&children_, edge);
- delete edge;
-}
-
-
-void HeapEntry::RemoveRetainer(HeapGraphEdge* edge) {
- RemoveEdge(&retainers_, edge);
-}
-
-
-void HeapEntry::CutEdges() {
- for (int i = 0; i < children_.length(); ++i) {
- HeapGraphEdge* edge = children_[i];
- edge->to()->RemoveRetainer(edge);
- }
- children_.Iterate(DeleteHeapGraphEdge);
- children_.Clear();
-
- for (int i = 0; i < retainers_.length(); ++i) {
- HeapGraphEdge* edge = retainers_[i];
- edge->from()->RemoveChild(edge);
- }
- retainers_.Clear();
-}
-
-
-void HeapEntry::Print(int max_depth, int indent) {
- OS::Print("%6d %6d %6d [%ld] ",
- self_size_, TotalSize(), NonSharedTotalSize(), id_);
- if (type_ != STRING) {
- OS::Print("%s %.40s\n", TypeAsString(), name_);
- } else {
- OS::Print("\"");
- const char* c = name_;
- while (*c && (c - name_) <= 40) {
- if (*c != '\n')
- OS::Print("%c", *c);
- else
- OS::Print("\\n");
- ++c;
- }
- OS::Print("\"\n");
- }
- if (--max_depth == 0) return;
- const int children_count = children_.length();
- for (int i = 0; i < children_count; ++i) {
- HeapGraphEdge* edge = children_[i];
- switch (edge->type()) {
- case HeapGraphEdge::CONTEXT_VARIABLE:
- OS::Print(" %*c #%s: ", indent, ' ', edge->name());
- break;
- case HeapGraphEdge::ELEMENT:
- OS::Print(" %*c %d: ", indent, ' ', edge->index());
- break;
- case HeapGraphEdge::INTERNAL:
- OS::Print(" %*c $%s: ", indent, ' ', edge->name());
- break;
- case HeapGraphEdge::PROPERTY:
- OS::Print(" %*c %s: ", indent, ' ', edge->name());
- break;
- default:
- OS::Print("!!! unknown edge type: %d ", edge->type());
- }
- edge->to()->Print(max_depth, indent + 2);
- }
-}
-
-
-const char* HeapEntry::TypeAsString() {
- switch (type_) {
- case INTERNAL: return "/internal/";
- case OBJECT: return "/object/";
- case CLOSURE: return "/closure/";
- case STRING: return "/string/";
- case CODE: return "/code/";
- case ARRAY: return "/array/";
- default: return "???";
- }
}
@@ -1151,21 +1161,21 @@ HeapGraphPath::HeapGraphPath(const List<HeapGraphEdge*>& path)
void HeapGraphPath::Print() {
- path_[0]->from()->Print(1, 0);
+ path_[0]->From()->Print(1, 0);
for (int i = 0; i < path_.length(); ++i) {
OS::Print(" -> ");
HeapGraphEdge* edge = path_[i];
switch (edge->type()) {
- case HeapGraphEdge::CONTEXT_VARIABLE:
+ case HeapGraphEdge::kContextVariable:
OS::Print("[#%s] ", edge->name());
break;
- case HeapGraphEdge::ELEMENT:
+ case HeapGraphEdge::kElement:
OS::Print("[%d] ", edge->index());
break;
- case HeapGraphEdge::INTERNAL:
+ case HeapGraphEdge::kInternal:
OS::Print("[$%s] ", edge->name());
break;
- case HeapGraphEdge::PROPERTY:
+ case HeapGraphEdge::kProperty:
OS::Print("[%s] ", edge->name());
break;
default:
@@ -1177,76 +1187,27 @@ void HeapGraphPath::Print() {
}
-class IndexedReferencesExtractor : public ObjectVisitor {
- public:
- IndexedReferencesExtractor(HeapSnapshot* snapshot, HeapEntry* parent)
- : snapshot_(snapshot),
- parent_(parent) {
- }
-
- void VisitPointer(Object** o) {
- if (!(*o)->IsHeapObject()) return;
- HeapEntry* entry = snapshot_->GetEntry(HeapObject::cast(*o));
- if (entry != NULL) {
- parent_->SetAutoIndexReference(entry);
- }
- }
-
- void VisitPointers(Object** start, Object** end) {
- for (Object** p = start; p < end; p++) VisitPointer(p);
- }
-
- private:
- HeapSnapshot* snapshot_;
- HeapEntry* parent_;
-};
-
-
-HeapEntriesMap::HeapEntriesMap()
- : entries_(HeapObjectsMatch) {
-}
-
-
-HeapEntriesMap::~HeapEntriesMap() {
- for (HashMap::Entry* p = entries_.Start();
- p != NULL;
- p = entries_.Next(p)) {
- if (!IsAlias(p->value)) delete reinterpret_cast<HeapEntry*>(p->value);
- }
-}
+HeapObject *const HeapSnapshot::kInternalRootObject =
+ reinterpret_cast<HeapObject*>(1);
-void HeapEntriesMap::Alias(HeapObject* object, HeapEntry* entry) {
- HashMap::Entry* cache_entry = entries_.Lookup(object, Hash(object), true);
- if (cache_entry->value == NULL)
- cache_entry->value = reinterpret_cast<void*>(
- reinterpret_cast<intptr_t>(entry) | kAliasTag);
-}
+// It is very important to keep objects that form a heap snapshot
+// as small as possible.
+namespace { // Avoid littering the global namespace.
+template <size_t ptr_size> struct SnapshotSizeConstants;
-void HeapEntriesMap::Apply(void (HeapEntry::*Func)(void)) {
- for (HashMap::Entry* p = entries_.Start();
- p != NULL;
- p = entries_.Next(p)) {
- if (!IsAlias(p->value)) (reinterpret_cast<HeapEntry*>(p->value)->*Func)();
- }
-}
-
-
-HeapEntry* HeapEntriesMap::Map(HeapObject* object) {
- HashMap::Entry* cache_entry = entries_.Lookup(object, Hash(object), false);
- return cache_entry != NULL ?
- reinterpret_cast<HeapEntry*>(
- reinterpret_cast<intptr_t>(cache_entry->value) & (~kAliasTag)) : NULL;
-}
-
+template <> struct SnapshotSizeConstants<4> {
+ static const int kExpectedHeapGraphEdgeSize = 12;
+ static const int kExpectedHeapEntrySize = 32;
+};
-void HeapEntriesMap::Pair(HeapObject* object, HeapEntry* entry) {
- HashMap::Entry* cache_entry = entries_.Lookup(object, Hash(object), true);
- ASSERT(cache_entry->value == NULL);
- cache_entry->value = entry;
-}
+template <> struct SnapshotSizeConstants<8> {
+ static const int kExpectedHeapGraphEdgeSize = 24;
+ static const int kExpectedHeapEntrySize = 40;
+};
+} // namespace
HeapSnapshot::HeapSnapshot(HeapSnapshotsCollection* collection,
const char* title,
@@ -1254,176 +1215,157 @@ HeapSnapshot::HeapSnapshot(HeapSnapshotsCollection* collection,
: collection_(collection),
title_(title),
uid_(uid),
- root_(this),
- sorted_entries_(NULL) {
+ root_entry_index_(-1),
+ raw_entries_(NULL),
+ entries_sorted_(false) {
+ STATIC_ASSERT(
+ sizeof(HeapGraphEdge) ==
+ SnapshotSizeConstants<sizeof(void*)>::kExpectedHeapGraphEdgeSize); // NOLINT
+ STATIC_ASSERT(
+ sizeof(HeapEntry) ==
+ SnapshotSizeConstants<sizeof(void*)>::kExpectedHeapEntrySize); // NOLINT
}
+static void DisposeCalculatedData(HeapEntryCalculatedData* cdata) {
+ cdata->Dispose();
+}
+
HeapSnapshot::~HeapSnapshot() {
- delete sorted_entries_;
+ DeleteArray(raw_entries_);
+ calculated_data_.Iterate(DisposeCalculatedData);
}
-void HeapSnapshot::ClearPaint() {
- root_.ClearPaint();
- entries_.Apply(&HeapEntry::ClearPaint);
+void HeapSnapshot::AllocateEntries(int entries_count,
+ int children_count,
+ int retainers_count) {
+ ASSERT(raw_entries_ == NULL);
+ raw_entries_ = NewArray<char>(
+ HeapEntry::EntriesSize(entries_count, children_count, retainers_count));
}
-HeapEntry* HeapSnapshot::GetEntry(Object* obj) {
- if (!obj->IsHeapObject()) return NULL;
- HeapObject* object = HeapObject::cast(obj);
-
- {
- HeapEntry* existing = FindEntry(object);
- if (existing != NULL) return existing;
- }
-
- // Add new entry.
- if (object->IsJSFunction()) {
+HeapEntry* HeapSnapshot::AddEntry(HeapObject* object,
+ int children_count,
+ int retainers_count) {
+ if (object == kInternalRootObject) {
+ ASSERT(root_entry_index_ == -1);
+ root_entry_index_ = entries_.length();
+ HeapEntry* entry = GetNextEntryToInit();
+ entry->Init(this, children_count, retainers_count);
+ return entry;
+ } else if (object->IsJSFunction()) {
JSFunction* func = JSFunction::cast(object);
SharedFunctionInfo* shared = func->shared();
String* name = String::cast(shared->name())->length() > 0 ?
String::cast(shared->name()) : shared->inferred_name();
- return AddEntry(object, HeapEntry::CLOSURE, collection_->GetName(name));
+ return AddEntry(object,
+ HeapEntry::kClosure,
+ collection_->GetName(name),
+ children_count,
+ retainers_count);
} else if (object->IsJSObject()) {
return AddEntry(object,
- HeapEntry::OBJECT,
+ HeapEntry::kObject,
collection_->GetName(
- JSObject::cast(object)->constructor_name()));
- } else if (object->IsJSGlobalPropertyCell()) {
- HeapEntry* value = GetEntry(JSGlobalPropertyCell::cast(object)->value());
- // If GPC references an object that we have interest in, add the object.
- // We don't store HeapEntries for GPCs. Instead, we make our hash map
- // to point to object's HeapEntry by GPCs address.
- if (value != NULL) AddEntryAlias(object, value);
- return value;
+ JSObject::cast(object)->constructor_name()),
+ children_count,
+ retainers_count);
} else if (object->IsString()) {
return AddEntry(object,
- HeapEntry::STRING,
- collection_->GetName(String::cast(object)));
+ HeapEntry::kString,
+ collection_->GetName(String::cast(object)),
+ children_count,
+ retainers_count);
} else if (object->IsCode()) {
- return AddEntry(object, HeapEntry::CODE);
+ return AddEntry(object,
+ HeapEntry::kCode,
+ "",
+ children_count,
+ retainers_count);
} else if (object->IsSharedFunctionInfo()) {
SharedFunctionInfo* shared = SharedFunctionInfo::cast(object);
String* name = String::cast(shared->name())->length() > 0 ?
String::cast(shared->name()) : shared->inferred_name();
- return AddEntry(object, HeapEntry::CODE, collection_->GetName(name));
+ return AddEntry(object,
+ HeapEntry::kCode,
+ collection_->GetName(name),
+ children_count,
+ retainers_count);
} else if (object->IsScript()) {
Script* script = Script::cast(object);
return AddEntry(object,
- HeapEntry::CODE,
+ HeapEntry::kCode,
script->name()->IsString() ?
- collection_->GetName(String::cast(script->name())) : "");
+ collection_->GetName(String::cast(script->name())) : "",
+ children_count,
+ retainers_count);
} else if (object->IsFixedArray()) {
- return AddEntry(object, HeapEntry::ARRAY);
+ return AddEntry(object,
+ HeapEntry::kArray,
+ "",
+ children_count,
+ retainers_count);
}
// No interest in this object.
return NULL;
}
-void HeapSnapshot::SetClosureReference(HeapEntry* parent,
- String* reference_name,
- Object* child) {
- HeapEntry* child_entry = GetEntry(child);
- if (child_entry != NULL) {
- parent->SetClosureReference(
- collection_->GetName(reference_name), child_entry);
- }
+bool HeapSnapshot::WillAddEntry(HeapObject* object) {
+ return object == kInternalRootObject
+ || object->IsJSFunction()
+ || object->IsJSObject()
+ || object->IsString()
+ || object->IsCode()
+ || object->IsSharedFunctionInfo()
+ || object->IsScript()
+ || object->IsFixedArray();
}
-void HeapSnapshot::SetElementReference(HeapEntry* parent,
- int index,
- Object* child) {
- HeapEntry* child_entry = GetEntry(child);
- if (child_entry != NULL) {
- parent->SetElementReference(index, child_entry);
- }
+static void HeapEntryClearPaint(HeapEntry** entry_ptr) {
+ (*entry_ptr)->clear_paint();
}
-
-void HeapSnapshot::SetInternalReference(HeapEntry* parent,
- const char* reference_name,
- Object* child) {
- HeapEntry* child_entry = GetEntry(child);
- if (child_entry != NULL) {
- parent->SetInternalReference(reference_name, child_entry);
- }
+void HeapSnapshot::ClearPaint() {
+ entries_.Iterate(HeapEntryClearPaint);
}
-void HeapSnapshot::SetPropertyReference(HeapEntry* parent,
- String* reference_name,
- Object* child) {
- HeapEntry* child_entry = GetEntry(child);
- if (child_entry != NULL) {
- parent->SetPropertyReference(
- collection_->GetName(reference_name), child_entry);
- }
+int HeapSnapshot::AddCalculatedData() {
+ calculated_data_.Add(HeapEntryCalculatedData());
+ return calculated_data_.length() - 1;
}
HeapEntry* HeapSnapshot::AddEntry(HeapObject* object,
HeapEntry::Type type,
- const char* name) {
- HeapEntry* entry = new HeapEntry(this,
- type,
- name,
- collection_->GetObjectId(object->address()),
- GetObjectSize(object),
- GetObjectSecurityToken(object));
- entries_.Pair(object, entry);
-
- // Detect, if this is a JS global object of the current context, and
- // add it to snapshot's roots. There can be several JS global objects
- // in a context.
- if (object->IsJSGlobalProxy()) {
- int global_security_token = GetGlobalSecurityToken();
- int object_security_token =
- collection_->token_enumerator()->GetTokenId(
- Context::cast(
- JSGlobalProxy::cast(object)->context())->security_token());
- if (object_security_token == TokenEnumerator::kNoSecurityToken
- || object_security_token == global_security_token) {
- HeapEntry* global_object_entry =
- GetEntry(HeapObject::cast(object->map()->prototype()));
- ASSERT(global_object_entry != NULL);
- root_.SetAutoIndexReference(global_object_entry);
- }
- }
-
+ const char* name,
+ int children_count,
+ int retainers_count) {
+ HeapEntry* entry = GetNextEntryToInit();
+ entry->Init(this,
+ type,
+ name,
+ collection_->GetObjectId(object->address()),
+ GetObjectSize(object),
+ children_count,
+ retainers_count);
return entry;
}
-class EdgesCutter {
- public:
- explicit EdgesCutter(int global_security_token)
- : global_security_token_(global_security_token) {
- }
-
- void Apply(HeapEntry* entry) {
- if (entry->security_token_id() != TokenEnumerator::kNoSecurityToken
- && entry->security_token_id() != global_security_token_) {
- entry->CutEdges();
- }
+HeapEntry* HeapSnapshot::GetNextEntryToInit() {
+ if (entries_.length() > 0) {
+ HeapEntry* last_entry = entries_.last();
+ entries_.Add(reinterpret_cast<HeapEntry*>(
+ reinterpret_cast<char*>(last_entry) + last_entry->EntrySize()));
+ } else {
+ entries_.Add(reinterpret_cast<HeapEntry*>(raw_entries_));
}
-
- private:
- const int global_security_token_;
-};
-
-void HeapSnapshot::CutObjectsFromForeignSecurityContexts() {
- EdgesCutter cutter(GetGlobalSecurityToken());
- entries_.Apply(&cutter);
-}
-
-
-int HeapSnapshot::GetGlobalSecurityToken() {
- return collection_->token_enumerator()->GetTokenId(
- Top::context()->global()->global_context()->security_token());
+ return entries_.last();
}
@@ -1433,24 +1375,14 @@ int HeapSnapshot::GetObjectSize(HeapObject* obj) {
}
-int HeapSnapshot::GetObjectSecurityToken(HeapObject* obj) {
- if (obj->IsGlobalContext()) {
- return collection_->token_enumerator()->GetTokenId(
- Context::cast(obj)->security_token());
- } else {
- return TokenEnumerator::kNoSecurityToken;
- }
-}
-
-
int HeapSnapshot::CalculateNetworkSize(JSObject* obj) {
int size = obj->Size();
// If 'properties' and 'elements' are non-empty (thus, non-shared),
// take their size into account.
- if (FixedArray::cast(obj->properties())->length() != 0) {
+ if (obj->properties() != Heap::empty_fixed_array()) {
size += obj->properties()->Size();
}
- if (FixedArray::cast(obj->elements())->length() != 0) {
+ if (obj->elements() != Heap::empty_fixed_array()) {
size += obj->elements()->Size();
}
// For functions, also account non-empty context and literals sizes.
@@ -1467,15 +1399,10 @@ int HeapSnapshot::CalculateNetworkSize(JSObject* obj) {
}
-class EntriesCollector {
- public:
- explicit EntriesCollector(List<HeapEntry*>* list) : list_(list) { }
- void Apply(HeapEntry* entry) {
- list_->Add(entry);
- }
- private:
- List<HeapEntry*>* list_;
-};
+HeapSnapshotsDiff* HeapSnapshot::CompareWith(HeapSnapshot* snapshot) {
+ return collection_->CompareSnapshots(this, snapshot);
+}
+
template<class T>
static int SortByIds(const T* entry1_ptr,
@@ -1485,22 +1412,16 @@ static int SortByIds(const T* entry1_ptr,
}
List<HeapEntry*>* HeapSnapshot::GetSortedEntriesList() {
- if (sorted_entries_ != NULL) return sorted_entries_;
- sorted_entries_ = new List<HeapEntry*>(entries_.capacity());
- EntriesCollector collector(sorted_entries_);
- entries_.Apply(&collector);
- sorted_entries_->Sort(SortByIds);
- return sorted_entries_;
-}
-
-
-HeapSnapshotsDiff* HeapSnapshot::CompareWith(HeapSnapshot* snapshot) {
- return collection_->CompareSnapshots(this, snapshot);
+ if (!entries_sorted_) {
+ entries_.Sort(SortByIds);
+ entries_sorted_ = true;
+ }
+ return &entries_;
}
void HeapSnapshot::Print(int max_depth) {
- root_.Print(max_depth, 0);
+ root()->Print(max_depth, 0);
}
@@ -1571,6 +1492,7 @@ uint64_t HeapObjectsMap::FindEntry(Address addr) {
void HeapObjectsMap::RemoveDeadEntries() {
List<EntryInfo>* new_entries = new List<EntryInfo>();
+ List<void*> dead_entries;
for (HashMap::Entry* entry = entries_map_.Start();
entry != NULL;
entry = entries_map_.Next(entry)) {
@@ -1580,8 +1502,15 @@ void HeapObjectsMap::RemoveDeadEntries() {
if (entry_info.accessed) {
entry->value = reinterpret_cast<void*>(new_entries->length());
new_entries->Add(EntryInfo(entry_info.id, false));
+ } else {
+ dead_entries.Add(entry->key);
}
}
+ for (int i = 0; i < dead_entries.length(); ++i) {
+ void* raw_entry = dead_entries[i];
+ entries_map_.Remove(
+ raw_entry, AddressHash(reinterpret_cast<Address>(raw_entry)));
+ }
delete entries_;
entries_ = new_entries;
}
@@ -1635,53 +1564,343 @@ HeapSnapshotsDiff* HeapSnapshotsCollection::CompareSnapshots(
}
+HeapEntriesMap::HeapEntriesMap()
+ : entries_(HeapObjectsMatch),
+ entries_count_(0),
+ total_children_count_(0),
+ total_retainers_count_(0) {
+}
+
+
+HeapEntriesMap::~HeapEntriesMap() {
+ for (HashMap::Entry* p = entries_.Start(); p != NULL; p = entries_.Next(p)) {
+ if (!IsAlias(p->value)) delete reinterpret_cast<EntryInfo*>(p->value);
+ }
+}
+
+
+void HeapEntriesMap::Alias(HeapObject* from, HeapObject* to) {
+ HashMap::Entry* from_cache_entry = entries_.Lookup(from, Hash(from), true);
+ HashMap::Entry* to_cache_entry = entries_.Lookup(to, Hash(to), false);
+ if (from_cache_entry->value == NULL) {
+ ASSERT(to_cache_entry != NULL);
+ from_cache_entry->value = MakeAlias(to_cache_entry->value);
+ }
+}
+
+
+HeapEntry* HeapEntriesMap::Map(HeapObject* object) {
+ HashMap::Entry* cache_entry = entries_.Lookup(object, Hash(object), false);
+ if (cache_entry != NULL) {
+ EntryInfo* entry_info =
+ reinterpret_cast<EntryInfo*>(Unalias(cache_entry->value));
+ return entry_info->entry;
+ } else {
+ return NULL;
+ }
+}
+
+
+void HeapEntriesMap::Pair(HeapObject* object, HeapEntry* entry) {
+ HashMap::Entry* cache_entry = entries_.Lookup(object, Hash(object), true);
+ ASSERT(cache_entry->value == NULL);
+ cache_entry->value = new EntryInfo(entry);
+ ++entries_count_;
+}
+
+
+void HeapEntriesMap::CountReference(HeapObject* from, HeapObject* to,
+ int* prev_children_count,
+ int* prev_retainers_count) {
+ HashMap::Entry* from_cache_entry = entries_.Lookup(from, Hash(from), true);
+ HashMap::Entry* to_cache_entry = entries_.Lookup(to, Hash(to), false);
+ ASSERT(from_cache_entry != NULL);
+ ASSERT(to_cache_entry != NULL);
+ EntryInfo* from_entry_info =
+ reinterpret_cast<EntryInfo*>(Unalias(from_cache_entry->value));
+ EntryInfo* to_entry_info =
+ reinterpret_cast<EntryInfo*>(Unalias(to_cache_entry->value));
+ if (prev_children_count)
+ *prev_children_count = from_entry_info->children_count;
+ if (prev_retainers_count)
+ *prev_retainers_count = to_entry_info->retainers_count;
+ ++from_entry_info->children_count;
+ ++to_entry_info->retainers_count;
+ ++total_children_count_;
+ ++total_retainers_count_;
+}
+
+
+template<class Visitor>
+void HeapEntriesMap::UpdateEntries(Visitor* visitor) {
+ for (HashMap::Entry* p = entries_.Start();
+ p != NULL;
+ p = entries_.Next(p)) {
+ if (!IsAlias(p->value)) {
+ EntryInfo* entry_info = reinterpret_cast<EntryInfo*>(p->value);
+ entry_info->entry = visitor->GetEntry(
+ reinterpret_cast<HeapObject*>(p->key),
+ entry_info->children_count,
+ entry_info->retainers_count);
+ entry_info->children_count = 0;
+ entry_info->retainers_count = 0;
+ }
+ }
+}
+
+
HeapSnapshotGenerator::HeapSnapshotGenerator(HeapSnapshot* snapshot)
- : snapshot_(snapshot) {
+ : snapshot_(snapshot),
+ collection_(snapshot->collection()),
+ filler_(NULL) {
}
+HeapEntry *const
+HeapSnapshotGenerator::SnapshotFillerInterface::kHeapEntryPlaceholder =
+ reinterpret_cast<HeapEntry*>(1);
+
+class SnapshotCounter : public HeapSnapshotGenerator::SnapshotFillerInterface {
+ public:
+ explicit SnapshotCounter(HeapEntriesMap* entries)
+ : entries_(entries) { }
+ HeapEntry* AddEntry(HeapObject* obj) {
+ entries_->Pair(obj, kHeapEntryPlaceholder);
+ return kHeapEntryPlaceholder;
+ }
+ void SetElementReference(HeapObject* parent_obj,
+ HeapEntry*,
+ int,
+ Object* child_obj,
+ HeapEntry*) {
+ entries_->CountReference(parent_obj, HeapObject::cast(child_obj));
+ }
+ void SetNamedReference(HeapGraphEdge::Type,
+ HeapObject* parent_obj,
+ HeapEntry*,
+ const char*,
+ Object* child_obj,
+ HeapEntry*) {
+ entries_->CountReference(parent_obj, HeapObject::cast(child_obj));
+ }
+ void SetRootReference(Object* child_obj, HeapEntry*) {
+ entries_->CountReference(
+ HeapSnapshot::kInternalRootObject, HeapObject::cast(child_obj));
+ }
+ private:
+ HeapEntriesMap* entries_;
+};
+
+
+class SnapshotFiller : public HeapSnapshotGenerator::SnapshotFillerInterface {
+ public:
+ explicit SnapshotFiller(HeapSnapshot* snapshot, HeapEntriesMap* entries)
+ : snapshot_(snapshot),
+ collection_(snapshot->collection()),
+ entries_(entries) { }
+ HeapEntry* AddEntry(HeapObject* obj) {
+ UNREACHABLE();
+ return NULL;
+ }
+ void SetElementReference(HeapObject* parent_obj,
+ HeapEntry* parent_entry,
+ int index,
+ Object* child_obj,
+ HeapEntry* child_entry) {
+ int child_index, retainer_index;
+ entries_->CountReference(parent_obj, HeapObject::cast(child_obj),
+ &child_index, &retainer_index);
+ parent_entry->SetElementReference(
+ child_index, index, child_entry, retainer_index);
+ }
+ void SetNamedReference(HeapGraphEdge::Type type,
+ HeapObject* parent_obj,
+ HeapEntry* parent_entry,
+ const char* reference_name,
+ Object* child_obj,
+ HeapEntry* child_entry) {
+ int child_index, retainer_index;
+ entries_->CountReference(parent_obj, HeapObject::cast(child_obj),
+ &child_index, &retainer_index);
+ parent_entry->SetNamedReference(type,
+ child_index,
+ reference_name,
+ child_entry,
+ retainer_index);
+ }
+ void SetRootReference(Object* child_obj, HeapEntry* child_entry) {
+ int child_index, retainer_index;
+ entries_->CountReference(
+ HeapSnapshot::kInternalRootObject, HeapObject::cast(child_obj),
+ &child_index, &retainer_index);
+ snapshot_->root()->SetElementReference(
+ child_index, child_index + 1, child_entry, retainer_index);
+ }
+ private:
+ HeapSnapshot* snapshot_;
+ HeapSnapshotsCollection* collection_;
+ HeapEntriesMap* entries_;
+};
+
+class SnapshotAllocator {
+ public:
+ explicit SnapshotAllocator(HeapSnapshot* snapshot)
+ : snapshot_(snapshot) { }
+ HeapEntry* GetEntry(
+ HeapObject* obj, int children_count, int retainers_count) {
+ HeapEntry* entry =
+ snapshot_->AddEntry(obj, children_count, retainers_count);
+ ASSERT(entry != NULL);
+ return entry;
+ }
+ private:
+ HeapSnapshot* snapshot_;
+};
+
void HeapSnapshotGenerator::GenerateSnapshot() {
AssertNoAllocation no_alloc;
- // Iterate heap contents.
- HeapIterator iterator;
- for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
+ // Pass 1. Iterate heap contents to count entries and references.
+ SnapshotCounter counter(&entries_);
+ filler_ = &counter;
+ filler_->AddEntry(HeapSnapshot::kInternalRootObject);
+ HeapIterator iterator1;
+ for (HeapObject* obj = iterator1.next();
+ obj != NULL;
+ obj = iterator1.next()) {
ExtractReferences(obj);
}
- snapshot_->CutObjectsFromForeignSecurityContexts();
+ // Allocate and fill entries in the snapshot, allocate references.
+ snapshot_->AllocateEntries(entries_.entries_count(),
+ entries_.total_children_count(),
+ entries_.total_retainers_count());
+ SnapshotAllocator allocator(snapshot_);
+ entries_.UpdateEntries(&allocator);
+
+ // Pass 2. Fill references.
+ SnapshotFiller filler(snapshot_, &entries_);
+ filler_ = &filler;
+ HeapIterator iterator2;
+ for (HeapObject* obj = iterator2.next();
+ obj != NULL;
+ obj = iterator2.next()) {
+ ExtractReferences(obj);
+ }
+}
+
+
+HeapEntry* HeapSnapshotGenerator::GetEntry(Object* obj) {
+ if (!obj->IsHeapObject()) return NULL;
+ HeapObject* object = HeapObject::cast(obj);
+ HeapEntry* entry = entries_.Map(object);
+
+ // A new entry.
+ if (entry == NULL) {
+ if (obj->IsJSGlobalPropertyCell()) {
+ Object* cell_target = JSGlobalPropertyCell::cast(obj)->value();
+ entry = GetEntry(cell_target);
+ // If GPC references an object that we have interest in (see
+ // HeapSnapshot::AddEntry, WillAddEntry), add the object. We
+ // don't store HeapEntries for GPCs. Instead, we make our hash
+ // map to point to object's HeapEntry by GPCs address.
+ if (entry != NULL) {
+ entries_.Alias(object, HeapObject::cast(cell_target));
+ }
+ return entry;
+ }
+
+ if (snapshot_->WillAddEntry(object)) entry = filler_->AddEntry(object);
+ }
+
+ return entry;
+}
+
+
+int HeapSnapshotGenerator::GetGlobalSecurityToken() {
+ return collection_->token_enumerator()->GetTokenId(
+ Top::context()->global()->global_context()->security_token());
+}
+
+
+int HeapSnapshotGenerator::GetObjectSecurityToken(HeapObject* obj) {
+ if (obj->IsGlobalContext()) {
+ return collection_->token_enumerator()->GetTokenId(
+ Context::cast(obj)->security_token());
+ } else {
+ return TokenEnumerator::kNoSecurityToken;
+ }
}
+class IndexedReferencesExtractor : public ObjectVisitor {
+ public:
+ IndexedReferencesExtractor(HeapSnapshotGenerator* generator,
+ HeapObject* parent_obj,
+ HeapEntry* parent_entry)
+ : generator_(generator),
+ parent_obj_(parent_obj),
+ parent_(parent_entry),
+ next_index_(1) {
+ }
+
+ void VisitPointer(Object** o) {
+ generator_->SetElementReference(parent_obj_, parent_, next_index_++, *o);
+ }
+
+ void VisitPointers(Object** start, Object** end) {
+ for (Object** p = start; p < end; p++) VisitPointer(p);
+ }
+
+ private:
+ HeapSnapshotGenerator* generator_;
+ HeapObject* parent_obj_;
+ HeapEntry* parent_;
+ int next_index_;
+};
+
+
void HeapSnapshotGenerator::ExtractReferences(HeapObject* obj) {
- HeapEntry* entry = snapshot_->GetEntry(obj);
- if (entry == NULL) return;
- if (entry->visited()) return;
+ // We need to reference JS global objects from snapshot's root.
+ // We also need to only include global objects from the current
+ // security context. And we don't want to add the global proxy,
+ // as we don't have a special type for it.
+ if (obj->IsJSGlobalProxy()) {
+ int global_security_token = GetGlobalSecurityToken();
+ JSGlobalProxy* proxy = JSGlobalProxy::cast(obj);
+ int object_security_token =
+ collection_->token_enumerator()->GetTokenId(
+ Context::cast(proxy->context())->security_token());
+ if (object_security_token == TokenEnumerator::kNoSecurityToken
+ || object_security_token == global_security_token) {
+ SetRootReference(proxy->map()->prototype());
+ }
+ return;
+ }
+
+ HeapEntry* entry = GetEntry(obj);
+ if (entry == NULL) return; // No interest in this object.
if (obj->IsJSObject()) {
JSObject* js_obj = JSObject::cast(obj);
ExtractClosureReferences(js_obj, entry);
ExtractPropertyReferences(js_obj, entry);
ExtractElementReferences(js_obj, entry);
- snapshot_->SetPropertyReference(
- entry, Heap::prototype_symbol(), js_obj->map()->prototype());
- } else if (obj->IsJSGlobalPropertyCell()) {
- JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(obj);
- snapshot_->SetElementReference(entry, 0, cell->value());
+ SetPropertyReference(
+ obj, entry, Heap::prototype_symbol(), js_obj->map()->prototype());
} else if (obj->IsString()) {
if (obj->IsConsString()) {
ConsString* cs = ConsString::cast(obj);
- snapshot_->SetElementReference(entry, 0, cs->first());
- snapshot_->SetElementReference(entry, 1, cs->second());
+ SetElementReference(obj, entry, 0, cs->first());
+ SetElementReference(obj, entry, 1, cs->second());
}
} else if (obj->IsCode() || obj->IsSharedFunctionInfo() || obj->IsScript()) {
- IndexedReferencesExtractor refs_extractor(snapshot_, entry);
+ IndexedReferencesExtractor refs_extractor(this, obj, entry);
obj->Iterate(&refs_extractor);
} else if (obj->IsFixedArray()) {
- IndexedReferencesExtractor refs_extractor(snapshot_, entry);
+ IndexedReferencesExtractor refs_extractor(this, obj, entry);
obj->Iterate(&refs_extractor);
}
- entry->MarkAsVisited();
}
@@ -1700,10 +1919,10 @@ void HeapSnapshotGenerator::ExtractClosureReferences(JSObject* js_obj,
String* local_name = *zone_scope_info.LocalName(i);
int idx = serialized_scope_info->ContextSlotIndex(local_name, NULL);
if (idx >= 0 && idx < context->length()) {
- snapshot_->SetClosureReference(entry, local_name, context->get(idx));
+ SetClosureReference(js_obj, entry, local_name, context->get(idx));
}
}
- snapshot_->SetInternalReference(entry, "code", func->shared());
+ SetInternalReference(js_obj, entry, "code", func->shared());
}
}
@@ -1716,13 +1935,13 @@ void HeapSnapshotGenerator::ExtractPropertyReferences(JSObject* js_obj,
switch (descs->GetType(i)) {
case FIELD: {
int index = descs->GetFieldIndex(i);
- snapshot_->SetPropertyReference(
- entry, descs->GetKey(i), js_obj->FastPropertyAt(index));
+ SetPropertyReference(
+ js_obj, entry, descs->GetKey(i), js_obj->FastPropertyAt(index));
break;
}
case CONSTANT_FUNCTION:
- snapshot_->SetPropertyReference(
- entry, descs->GetKey(i), descs->GetConstantFunction(i));
+ SetPropertyReference(
+ js_obj, entry, descs->GetKey(i), descs->GetConstantFunction(i));
break;
default: ;
}
@@ -1733,8 +1952,8 @@ void HeapSnapshotGenerator::ExtractPropertyReferences(JSObject* js_obj,
for (int i = 0; i < length; ++i) {
Object* k = dictionary->KeyAt(i);
if (dictionary->IsKey(k)) {
- snapshot_->SetPropertyReference(
- entry, String::cast(k), dictionary->ValueAt(i));
+ SetPropertyReference(
+ js_obj, entry, String::cast(k), dictionary->ValueAt(i));
}
}
}
@@ -1750,7 +1969,7 @@ void HeapSnapshotGenerator::ExtractElementReferences(JSObject* js_obj,
elements->length();
for (int i = 0; i < length; ++i) {
if (!elements->get(i)->IsTheHole()) {
- snapshot_->SetElementReference(entry, i, elements->get(i));
+ SetElementReference(js_obj, entry, i, elements->get(i));
}
}
} else if (js_obj->HasDictionaryElements()) {
@@ -1761,13 +1980,90 @@ void HeapSnapshotGenerator::ExtractElementReferences(JSObject* js_obj,
if (dictionary->IsKey(k)) {
ASSERT(k->IsNumber());
uint32_t index = static_cast<uint32_t>(k->Number());
- snapshot_->SetElementReference(entry, index, dictionary->ValueAt(i));
+ SetElementReference(js_obj, entry, index, dictionary->ValueAt(i));
}
}
}
}
+void HeapSnapshotGenerator::SetClosureReference(HeapObject* parent_obj,
+ HeapEntry* parent_entry,
+ String* reference_name,
+ Object* child_obj) {
+ HeapEntry* child_entry = GetEntry(child_obj);
+ if (child_entry != NULL) {
+ filler_->SetNamedReference(HeapGraphEdge::kContextVariable,
+ parent_obj,
+ parent_entry,
+ collection_->GetName(reference_name),
+ child_obj,
+ child_entry);
+ }
+}
+
+
+void HeapSnapshotGenerator::SetElementReference(HeapObject* parent_obj,
+ HeapEntry* parent_entry,
+ int index,
+ Object* child_obj) {
+ HeapEntry* child_entry = GetEntry(child_obj);
+ if (child_entry != NULL) {
+ filler_->SetElementReference(
+ parent_obj, parent_entry, index, child_obj, child_entry);
+ }
+}
+
+
+void HeapSnapshotGenerator::SetInternalReference(HeapObject* parent_obj,
+ HeapEntry* parent_entry,
+ const char* reference_name,
+ Object* child_obj) {
+ HeapEntry* child_entry = GetEntry(child_obj);
+ if (child_entry != NULL) {
+ filler_->SetNamedReference(HeapGraphEdge::kInternal,
+ parent_obj,
+ parent_entry,
+ reference_name,
+ child_obj,
+ child_entry);
+ }
+}
+
+
+void HeapSnapshotGenerator::SetPropertyReference(HeapObject* parent_obj,
+ HeapEntry* parent_entry,
+ String* reference_name,
+ Object* child_obj) {
+ HeapEntry* child_entry = GetEntry(child_obj);
+ if (child_entry != NULL) {
+ filler_->SetNamedReference(HeapGraphEdge::kProperty,
+ parent_obj,
+ parent_entry,
+ collection_->GetName(reference_name),
+ child_obj,
+ child_entry);
+ }
+}
+
+
+void HeapSnapshotGenerator::SetRootReference(Object* child_obj) {
+ HeapEntry* child_entry = GetEntry(child_obj);
+ ASSERT(child_entry != NULL);
+ filler_->SetRootReference(child_obj, child_entry);
+}
+
+
+void HeapSnapshotsDiff::CreateRoots(int additions_count, int deletions_count) {
+ raw_additions_root_ =
+ NewArray<char>(HeapEntry::EntriesSize(1, additions_count, 0));
+ additions_root()->Init(snapshot2_, additions_count, 0);
+ raw_deletions_root_ =
+ NewArray<char>(HeapEntry::EntriesSize(1, deletions_count, 0));
+ deletions_root()->Init(snapshot1_, deletions_count, 0);
+}
+
+
static void DeleteHeapSnapshotsDiff(HeapSnapshotsDiff** diff_ptr) {
delete *diff_ptr;
}
@@ -1779,8 +2075,6 @@ HeapSnapshotsComparator::~HeapSnapshotsComparator() {
HeapSnapshotsDiff* HeapSnapshotsComparator::Compare(HeapSnapshot* snapshot1,
HeapSnapshot* snapshot2) {
- HeapSnapshotsDiff* diff = new HeapSnapshotsDiff(snapshot1, snapshot2);
- diffs_.Add(diff);
List<HeapEntry*>* entries1 = snapshot1->GetSortedEntriesList();
List<HeapEntry*>* entries2 = snapshot2->GetSortedEntriesList();
int i = 0, j = 0;
@@ -1810,17 +2104,33 @@ HeapSnapshotsDiff* HeapSnapshotsComparator::Compare(HeapSnapshot* snapshot1,
snapshot1->ClearPaint();
snapshot1->root()->PaintAllReachable();
+ snapshot2->ClearPaint();
+ snapshot2->root()->PaintAllReachable();
+ int reachable_deleted_entries = 0, reachable_added_entries = 0;
+ for (int i = 0; i < deleted_entries.length(); ++i) {
+ HeapEntry* entry = deleted_entries[i];
+ if (entry->painted_reachable()) ++reachable_deleted_entries;
+ }
+ for (int i = 0; i < added_entries.length(); ++i) {
+ HeapEntry* entry = added_entries[i];
+ if (entry->painted_reachable()) ++reachable_added_entries;
+ }
+
+ HeapSnapshotsDiff* diff = new HeapSnapshotsDiff(snapshot1, snapshot2);
+ diffs_.Add(diff);
+ diff->CreateRoots(reachable_added_entries, reachable_deleted_entries);
+
+ int del_child_index = 0, deleted_entry_index = 1;
for (int i = 0; i < deleted_entries.length(); ++i) {
HeapEntry* entry = deleted_entries[i];
if (entry->painted_reachable())
- diff->AddDeletedEntry(entry);
+ diff->AddDeletedEntry(del_child_index++, deleted_entry_index++, entry);
}
- snapshot2->ClearPaint();
- snapshot2->root()->PaintAllReachable();
+ int add_child_index = 0, added_entry_index = 1;
for (int i = 0; i < added_entries.length(); ++i) {
HeapEntry* entry = added_entries[i];
if (entry->painted_reachable())
- diff->AddAddedEntry(entry);
+ diff->AddAddedEntry(add_child_index++, added_entry_index++, entry);
}
return diff;
}
diff --git a/deps/v8/src/profile-generator.h b/deps/v8/src/profile-generator.h
index cd2bd0b62..bebf40a37 100644
--- a/deps/v8/src/profile-generator.h
+++ b/deps/v8/src/profile-generator.h
@@ -279,15 +279,12 @@ class CpuProfilesCollection {
CpuProfile* StopProfiling(int security_token_id,
const char* title,
double actual_sampling_rate);
- CpuProfile* StopProfiling(int security_token_id,
- String* title,
- double actual_sampling_rate);
List<CpuProfile*>* Profiles(int security_token_id);
const char* GetName(String* name) {
return function_and_resource_names_.GetName(name);
}
CpuProfile* GetProfile(int security_token_id, unsigned uid);
- inline bool is_last_profile();
+ bool IsLastProfile(const char* title);
CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag,
String* name, String* resource_name, int line_number);
@@ -423,167 +420,194 @@ class ProfileGenerator {
};
-class HeapSnapshot;
class HeapEntry;
-
-class HeapGraphEdge {
+class HeapGraphEdge BASE_EMBEDDED {
public:
enum Type {
- CONTEXT_VARIABLE = v8::HeapGraphEdge::CONTEXT_VARIABLE,
- ELEMENT = v8::HeapGraphEdge::ELEMENT,
- PROPERTY = v8::HeapGraphEdge::PROPERTY,
- INTERNAL = v8::HeapGraphEdge::INTERNAL
+ kContextVariable = v8::HeapGraphEdge::kContextVariable,
+ kElement = v8::HeapGraphEdge::kElement,
+ kProperty = v8::HeapGraphEdge::kProperty,
+ kInternal = v8::HeapGraphEdge::kInternal
};
- HeapGraphEdge(Type type, const char* name, HeapEntry* from, HeapEntry* to);
- HeapGraphEdge(int index, HeapEntry* from, HeapEntry* to);
+ HeapGraphEdge() { }
+ void Init(int child_index, Type type, const char* name, HeapEntry* to);
+ void Init(int child_index, int index, HeapEntry* to);
- Type type() const { return type_; }
- int index() const {
- ASSERT(type_ == ELEMENT);
+ Type type() { return static_cast<Type>(type_); }
+ int index() {
+ ASSERT(type_ == kElement);
return index_;
}
- const char* name() const {
- ASSERT(type_ == CONTEXT_VARIABLE || type_ == PROPERTY || type_ == INTERNAL);
+ const char* name() {
+ ASSERT(type_ == kContextVariable
+ || type_ == kProperty
+ || type_ == kInternal);
return name_;
}
- HeapEntry* from() const { return from_; }
- HeapEntry* to() const { return to_; }
+ HeapEntry* to() { return to_; }
+
+ HeapEntry* From();
private:
- Type type_;
+ int child_index_ : 30;
+ unsigned type_ : 2;
union {
int index_;
const char* name_;
};
- HeapEntry* from_;
HeapEntry* to_;
DISALLOW_COPY_AND_ASSIGN(HeapGraphEdge);
};
-class HeapGraphPath;
class CachedHeapGraphPath;
+class HeapGraphPath;
+class HeapSnapshot;
-class HeapEntry {
+// HeapEntry instances represent an entity from the heap (or a special
+// virtual node, e.g. root). To make heap snapshots more compact,
+// HeapEntries has a special memory layout (no Vectors or Lists used):
+//
+// +-----------------+
+// HeapEntry
+// +-----------------+
+// HeapGraphEdge |
+// ... } children_count
+// HeapGraphEdge |
+// +-----------------+
+// HeapGraphEdge* |
+// ... } retainers_count
+// HeapGraphEdge* |
+// +-----------------+
+//
+// In a HeapSnapshot, all entries are hand-allocated in a continuous array
+// of raw bytes.
+//
+class HeapEntry BASE_EMBEDDED {
public:
enum Type {
- INTERNAL = v8::HeapGraphNode::INTERNAL,
- ARRAY = v8::HeapGraphNode::ARRAY,
- STRING = v8::HeapGraphNode::STRING,
- OBJECT = v8::HeapGraphNode::OBJECT,
- CODE = v8::HeapGraphNode::CODE,
- CLOSURE = v8::HeapGraphNode::CLOSURE
+ kInternal = v8::HeapGraphNode::kInternal,
+ kArray = v8::HeapGraphNode::kArray,
+ kString = v8::HeapGraphNode::kString,
+ kObject = v8::HeapGraphNode::kObject,
+ kCode = v8::HeapGraphNode::kCode,
+ kClosure = v8::HeapGraphNode::kClosure
};
- explicit HeapEntry(HeapSnapshot* snapshot)
- : snapshot_(snapshot),
- visited_(false),
- type_(INTERNAL),
- name_(""),
- id_(0),
- next_auto_index_(0),
- self_size_(0),
- security_token_id_(TokenEnumerator::kNoSecurityToken),
- children_(1),
- retainers_(0),
- retaining_paths_(0),
- total_size_(kUnknownSize),
- non_shared_total_size_(kUnknownSize),
- painted_(kUnpainted) { }
- HeapEntry(HeapSnapshot* snapshot,
+ HeapEntry() { }
+ void Init(HeapSnapshot* snapshot, int children_count, int retainers_count);
+ void Init(HeapSnapshot* snapshot,
Type type,
const char* name,
uint64_t id,
int self_size,
- int security_token_id)
- : snapshot_(snapshot),
- visited_(false),
- type_(type),
- name_(name),
- id_(id),
- next_auto_index_(1),
- self_size_(self_size),
- security_token_id_(security_token_id),
- children_(4),
- retainers_(4),
- retaining_paths_(4),
- total_size_(kUnknownSize),
- non_shared_total_size_(kUnknownSize),
- painted_(kUnpainted) { }
- ~HeapEntry();
-
- bool visited() const { return visited_; }
- Type type() const { return type_; }
- const char* name() const { return name_; }
- uint64_t id() const { return id_; }
- int self_size() const { return self_size_; }
- int security_token_id() const { return security_token_id_; }
- bool painted_reachable() { return painted_ == kPaintReachable; }
+ int children_count,
+ int retainers_count);
+
+ HeapSnapshot* snapshot() { return snapshot_; }
+ Type type() { return static_cast<Type>(type_); }
+ const char* name() { return name_; }
+ uint64_t id() { return id_; }
+ int self_size() { return self_size_; }
+
+ Vector<HeapGraphEdge> children() {
+ return Vector<HeapGraphEdge>(children_arr(), children_count_); }
+ Vector<HeapGraphEdge*> retainers() {
+ return Vector<HeapGraphEdge*>(retainers_arr(), retainers_count_); }
+ List<HeapGraphPath*>* GetRetainingPaths();
+
+ void clear_paint() { painted_ = kUnpainted; }
+ bool painted_reachable() { return painted_ == kPainted; }
+ void paint_reachable() {
+ ASSERT(painted_ == kUnpainted);
+ painted_ = kPainted;
+ }
bool not_painted_reachable_from_others() {
- return painted_ != kPaintReachableFromOthers;
+ return painted_ != kPaintedReachableFromOthers;
+ }
+ void paint_reachable_from_others() {
+ painted_ = kPaintedReachableFromOthers;
}
- const List<HeapGraphEdge*>* children() const { return &children_; }
- const List<HeapGraphEdge*>* retainers() const { return &retainers_; }
- const List<HeapGraphPath*>* GetRetainingPaths();
-
template<class Visitor>
void ApplyAndPaintAllReachable(Visitor* visitor);
-
- void ClearPaint() { painted_ = kUnpainted; }
- void CutEdges();
- void MarkAsVisited() { visited_ = true; }
void PaintAllReachable();
- void PaintReachable() {
- ASSERT(painted_ == kUnpainted);
- painted_ = kPaintReachable;
- }
- void PaintReachableFromOthers() { painted_ = kPaintReachableFromOthers; }
- void SetClosureReference(const char* name, HeapEntry* entry);
- void SetElementReference(int index, HeapEntry* entry);
- void SetInternalReference(const char* name, HeapEntry* entry);
- void SetPropertyReference(const char* name, HeapEntry* entry);
- void SetAutoIndexReference(HeapEntry* entry);
- void SetUnidirAutoIndexReference(HeapEntry* entry);
- int TotalSize();
- int NonSharedTotalSize();
+ void SetElementReference(
+ int child_index, int index, HeapEntry* entry, int retainer_index);
+ void SetNamedReference(HeapGraphEdge::Type type,
+ int child_index,
+ const char* name,
+ HeapEntry* entry,
+ int retainer_index);
+ void SetUnidirElementReference(int child_index, int index, HeapEntry* entry);
+
+ int EntrySize() { return EntriesSize(1, children_count_, retainers_count_); }
+ int ReachableSize();
+ int RetainedSize();
void Print(int max_depth, int indent);
- private:
- void AddEdge(HeapGraphEdge* edge);
- int CalculateTotalSize();
- int CalculateNonSharedTotalSize();
- void FindRetainingPaths(HeapEntry* node, CachedHeapGraphPath* prev_path);
- void RemoveChild(HeapGraphEdge* edge);
- void RemoveRetainer(HeapGraphEdge* edge);
+ static int EntriesSize(int entries_count,
+ int children_count,
+ int retainers_count);
+ private:
+ HeapGraphEdge* children_arr() {
+ return reinterpret_cast<HeapGraphEdge*>(this + 1);
+ }
+ HeapGraphEdge** retainers_arr() {
+ return reinterpret_cast<HeapGraphEdge**>(children_arr() + children_count_);
+ }
const char* TypeAsString();
+ unsigned painted_: 2;
+ unsigned type_: 3;
+ // The calculated data is stored in HeapSnapshot in HeapEntryCalculatedData
+ // entries. See AddCalculatedData and GetCalculatedData.
+ int calculated_data_index_: 27;
+ int self_size_;
+ int children_count_;
+ int retainers_count_;
HeapSnapshot* snapshot_;
- bool visited_;
- Type type_;
const char* name_;
uint64_t id_;
- int next_auto_index_;
- int self_size_;
- int security_token_id_;
- List<HeapGraphEdge*> children_;
- List<HeapGraphEdge*> retainers_;
- List<HeapGraphPath*> retaining_paths_;
- int total_size_;
- int non_shared_total_size_;
- int painted_;
+
+ static const unsigned kUnpainted = 0;
+ static const unsigned kPainted = 1;
+ static const unsigned kPaintedReachableFromOthers = 2;
+ static const int kNoCalculatedData = -1;
+
+ DISALLOW_COPY_AND_ASSIGN(HeapEntry);
+};
+
+
+class HeapEntryCalculatedData {
+ public:
+ HeapEntryCalculatedData()
+ : retaining_paths_(NULL),
+ reachable_size_(kUnknownSize),
+ retained_size_(kUnknownSize) {
+ }
+ void Dispose();
+
+ List<HeapGraphPath*>* GetRetainingPaths(HeapEntry* entry);
+ int ReachableSize(HeapEntry* entry);
+ int RetainedSize(HeapEntry* entry);
+
+ private:
+ void CalculateSizes(HeapEntry* entry);
+ void FindRetainingPaths(HeapEntry* entry, CachedHeapGraphPath* prev_path);
+
+ List<HeapGraphPath*>* retaining_paths_;
+ int reachable_size_;
+ int retained_size_;
static const int kUnknownSize = -1;
- static const int kUnpainted = 0;
- static const int kPaintReachable = 1;
- static const int kPaintReachableFromOthers = 2;
- DISALLOW_IMPLICIT_CONSTRUCTORS(HeapEntry);
+ // Allow generated copy constructor and assignment operator.
};
@@ -595,7 +619,7 @@ class HeapGraphPath {
void Add(HeapGraphEdge* edge) { path_.Add(edge); }
void Set(int index, HeapGraphEdge* edge) { path_[index] = edge; }
- const List<HeapGraphEdge*>* path() const { return &path_; }
+ const List<HeapGraphEdge*>* path() { return &path_; }
void Print();
@@ -606,39 +630,6 @@ class HeapGraphPath {
};
-class HeapEntriesMap {
- public:
- HeapEntriesMap();
- ~HeapEntriesMap();
-
- void Alias(HeapObject* object, HeapEntry* entry);
- void Apply(void (HeapEntry::*Func)(void));
- template<class Visitor>
- void Apply(Visitor* visitor);
- HeapEntry* Map(HeapObject* object);
- void Pair(HeapObject* object, HeapEntry* entry);
-
- uint32_t capacity() { return entries_.capacity(); }
-
- private:
- INLINE(uint32_t Hash(HeapObject* object)) {
- return static_cast<uint32_t>(reinterpret_cast<intptr_t>(object));
- }
- INLINE(static bool HeapObjectsMatch(void* key1, void* key2)) {
- return key1 == key2;
- }
- INLINE(bool IsAlias(void* ptr)) {
- return reinterpret_cast<intptr_t>(ptr) & kAliasTag;
- }
-
- static const intptr_t kAliasTag = 1;
-
- HashMap entries_;
-
- DISALLOW_COPY_AND_ASSIGN(HeapEntriesMap);
-};
-
-
class HeapSnapshotsCollection;
class HeapSnapshotsDiff;
@@ -653,53 +644,52 @@ class HeapSnapshot {
const char* title,
unsigned uid);
~HeapSnapshot();
- void ClearPaint();
- void CutObjectsFromForeignSecurityContexts();
- HeapEntry* GetEntry(Object* object);
- void SetClosureReference(
- HeapEntry* parent, String* reference_name, Object* child);
- void SetElementReference(HeapEntry* parent, int index, Object* child);
- void SetInternalReference(
- HeapEntry* parent, const char* reference_name, Object* child);
- void SetPropertyReference(
- HeapEntry* parent, String* reference_name, Object* child);
- INLINE(const char* title() const) { return title_; }
- INLINE(unsigned uid() const) { return uid_; }
- const HeapEntry* const_root() const { return &root_; }
- HeapEntry* root() { return &root_; }
- template<class Visitor>
- void IterateEntries(Visitor* visitor) { entries_.Apply(visitor); }
- List<HeapEntry*>* GetSortedEntriesList();
+ HeapSnapshotsCollection* collection() { return collection_; }
+ const char* title() { return title_; }
+ unsigned uid() { return uid_; }
+ HeapEntry* root() { return entries_[root_entry_index_]; }
+
+ void AllocateEntries(
+ int entries_count, int children_count, int retainers_count);
+ HeapEntry* AddEntry(
+ HeapObject* object, int children_count, int retainers_count);
+ bool WillAddEntry(HeapObject* object);
+ int AddCalculatedData();
+ HeapEntryCalculatedData& GetCalculatedData(int index) {
+ return calculated_data_[index];
+ }
+ void ClearPaint();
HeapSnapshotsDiff* CompareWith(HeapSnapshot* snapshot);
+ List<HeapEntry*>* GetSortedEntriesList();
+ template<class Visitor>
+ void IterateEntries(Visitor* visitor) { entries_.Iterate(visitor); }
void Print(int max_depth);
+ void PrintEntriesSize();
+
+ static HeapObject *const kInternalRootObject;
private:
- HeapEntry* AddEntry(HeapObject* object, HeapEntry::Type type) {
- return AddEntry(object, type, "");
- }
- HeapEntry* AddEntry(
- HeapObject* object, HeapEntry::Type type, const char* name);
- void AddEntryAlias(HeapObject* object, HeapEntry* entry) {
- entries_.Alias(object, entry);
- }
- HeapEntry* FindEntry(HeapObject* object) {
- return entries_.Map(object);
- }
- int GetGlobalSecurityToken();
- int GetObjectSecurityToken(HeapObject* obj);
+ HeapEntry* AddEntry(HeapObject* object,
+ HeapEntry::Type type,
+ const char* name,
+ int children_count,
+ int retainers_count);
+ HeapEntry* GetNextEntryToInit();
static int GetObjectSize(HeapObject* obj);
static int CalculateNetworkSize(JSObject* obj);
HeapSnapshotsCollection* collection_;
const char* title_;
unsigned uid_;
- HeapEntry root_;
- // Mapping from HeapObject* pointers to HeapEntry* pointers.
- HeapEntriesMap entries_;
- // Entries sorted by id.
- List<HeapEntry*>* sorted_entries_;
+ int root_entry_index_;
+ char* raw_entries_;
+ List<HeapEntry*> entries_;
+ bool entries_sorted_;
+ List<HeapEntryCalculatedData> calculated_data_;
+
+ friend class HeapSnapshotTester;
DISALLOW_COPY_AND_ASSIGN(HeapSnapshot);
};
@@ -748,30 +738,36 @@ class HeapSnapshotsDiff {
HeapSnapshotsDiff(HeapSnapshot* snapshot1, HeapSnapshot* snapshot2)
: snapshot1_(snapshot1),
snapshot2_(snapshot2),
- additions_root_(new HeapEntry(snapshot2)),
- deletions_root_(new HeapEntry(snapshot1)) { }
+ raw_additions_root_(NULL),
+ raw_deletions_root_(NULL) { }
~HeapSnapshotsDiff() {
- delete deletions_root_;
- delete additions_root_;
+ DeleteArray(raw_deletions_root_);
+ DeleteArray(raw_additions_root_);
}
- void AddAddedEntry(HeapEntry* entry) {
- additions_root_->SetUnidirAutoIndexReference(entry);
+ void AddAddedEntry(int child_index, int index, HeapEntry* entry) {
+ additions_root()->SetUnidirElementReference(child_index, index, entry);
}
- void AddDeletedEntry(HeapEntry* entry) {
- deletions_root_->SetUnidirAutoIndexReference(entry);
+ void AddDeletedEntry(int child_index, int index, HeapEntry* entry) {
+ deletions_root()->SetUnidirElementReference(child_index, index, entry);
}
- const HeapEntry* additions_root() const { return additions_root_; }
- const HeapEntry* deletions_root() const { return deletions_root_; }
+ void CreateRoots(int additions_count, int deletions_count);
+
+ HeapEntry* additions_root() {
+ return reinterpret_cast<HeapEntry*>(raw_additions_root_);
+ }
+ HeapEntry* deletions_root() {
+ return reinterpret_cast<HeapEntry*>(raw_deletions_root_);
+ }
private:
HeapSnapshot* snapshot1_;
HeapSnapshot* snapshot2_;
- HeapEntry* additions_root_;
- HeapEntry* deletions_root_;
+ char* raw_additions_root_;
+ char* raw_deletions_root_;
DISALLOW_COPY_AND_ASSIGN(HeapSnapshotsDiff);
};
@@ -830,18 +826,123 @@ class HeapSnapshotsCollection {
};
+// The HeapEntriesMap instance is used to track a mapping between
+// real heap objects and their representations in heap snapshots.
+class HeapEntriesMap {
+ public:
+ HeapEntriesMap();
+ ~HeapEntriesMap();
+
+ // Aliasing is used for skipping intermediate proxy objects, like
+ // JSGlobalPropertyCell.
+ void Alias(HeapObject* from, HeapObject* to);
+ HeapEntry* Map(HeapObject* object);
+ void Pair(HeapObject* object, HeapEntry* entry);
+ void CountReference(HeapObject* from, HeapObject* to,
+ int* prev_children_count = NULL,
+ int* prev_retainers_count = NULL);
+ template<class Visitor>
+ void UpdateEntries(Visitor* visitor);
+
+ int entries_count() { return entries_count_; }
+ int total_children_count() { return total_children_count_; }
+ int total_retainers_count() { return total_retainers_count_; }
+
+ private:
+ struct EntryInfo {
+ explicit EntryInfo(HeapEntry* entry)
+ : entry(entry), children_count(0), retainers_count(0) { }
+ HeapEntry* entry;
+ int children_count;
+ int retainers_count;
+ };
+
+ uint32_t Hash(HeapObject* object) {
+ return static_cast<uint32_t>(reinterpret_cast<intptr_t>(object));
+ }
+ static bool HeapObjectsMatch(void* key1, void* key2) { return key1 == key2; }
+
+ bool IsAlias(void* ptr) {
+ return reinterpret_cast<intptr_t>(ptr) & kAliasTag;
+ }
+ void* MakeAlias(void* ptr) {
+ return reinterpret_cast<void*>(reinterpret_cast<intptr_t>(ptr) | kAliasTag);
+ }
+ void* Unalias(void* ptr) {
+ return reinterpret_cast<void*>(
+ reinterpret_cast<intptr_t>(ptr) & (~kAliasTag));
+ }
+
+ HashMap entries_;
+ int entries_count_;
+ int total_children_count_;
+ int total_retainers_count_;
+
+ static const intptr_t kAliasTag = 1;
+
+ DISALLOW_COPY_AND_ASSIGN(HeapEntriesMap);
+};
+
+
class HeapSnapshotGenerator {
public:
+ class SnapshotFillerInterface {
+ public:
+ virtual ~SnapshotFillerInterface() { }
+ virtual HeapEntry* AddEntry(HeapObject* obj) = 0;
+ virtual void SetElementReference(HeapObject* parent_obj,
+ HeapEntry* parent_entry,
+ int index,
+ Object* child_obj,
+ HeapEntry* child_entry) = 0;
+ virtual void SetNamedReference(HeapGraphEdge::Type type,
+ HeapObject* parent_obj,
+ HeapEntry* parent_entry,
+ const char* reference_name,
+ Object* child_obj,
+ HeapEntry* child_entry) = 0;
+ virtual void SetRootReference(Object* child_obj,
+ HeapEntry* child_entry) = 0;
+
+ static HeapEntry *const kHeapEntryPlaceholder;
+ };
+
explicit HeapSnapshotGenerator(HeapSnapshot* snapshot);
void GenerateSnapshot();
private:
+ HeapEntry* GetEntry(Object* obj);
+ int GetGlobalSecurityToken();
+ int GetObjectSecurityToken(HeapObject* obj);
void ExtractReferences(HeapObject* obj);
void ExtractClosureReferences(JSObject* js_obj, HeapEntry* entry);
void ExtractPropertyReferences(JSObject* js_obj, HeapEntry* entry);
void ExtractElementReferences(JSObject* js_obj, HeapEntry* entry);
+ void SetClosureReference(HeapObject* parent_obj,
+ HeapEntry* parent,
+ String* reference_name,
+ Object* child);
+ void SetElementReference(HeapObject* parent_obj,
+ HeapEntry* parent,
+ int index,
+ Object* child);
+ void SetInternalReference(HeapObject* parent_obj,
+ HeapEntry* parent,
+ const char* reference_name,
+ Object* child);
+ void SetPropertyReference(HeapObject* parent_obj,
+ HeapEntry* parent,
+ String* reference_name,
+ Object* child);
+ void SetRootReference(Object* child);
HeapSnapshot* snapshot_;
+ HeapSnapshotsCollection* collection_;
+ // Mapping from HeapObject* pointers to HeapEntry* pointers.
+ HeapEntriesMap entries_;
+ SnapshotFillerInterface* filler_;
+
+ friend class IndexedReferencesExtractor;
DISALLOW_COPY_AND_ASSIGN(HeapSnapshotGenerator);
};
diff --git a/deps/v8/src/property.h b/deps/v8/src/property.h
index 15a56528d..01c58def3 100644
--- a/deps/v8/src/property.h
+++ b/deps/v8/src/property.h
@@ -115,8 +115,8 @@ class MapTransitionDescriptor: public Descriptor {
// the same CONSTANT_FUNCTION field.
class ConstTransitionDescriptor: public Descriptor {
public:
- explicit ConstTransitionDescriptor(String* key)
- : Descriptor(key, Smi::FromInt(0), NONE, CONSTANT_TRANSITION) { }
+ explicit ConstTransitionDescriptor(String* key, Map* map)
+ : Descriptor(key, map, NONE, CONSTANT_TRANSITION) { }
};
@@ -260,7 +260,7 @@ class LookupResult BASE_EMBEDDED {
Map* GetTransitionMap() {
ASSERT(lookup_type_ == DESCRIPTOR_TYPE);
- ASSERT(type() == MAP_TRANSITION);
+ ASSERT(type() == MAP_TRANSITION || type() == CONSTANT_TRANSITION);
return Map::cast(GetValue());
}
diff --git a/deps/v8/src/runtime.cc b/deps/v8/src/runtime.cc
index c7d3ff7f1..fc6ca762f 100644
--- a/deps/v8/src/runtime.cc
+++ b/deps/v8/src/runtime.cc
@@ -305,13 +305,14 @@ static Handle<Object> CreateObjectLiteralBoilerplate(
}
Handle<Object> result;
uint32_t element_index = 0;
- if (key->ToArrayIndex(&element_index)) {
- // Array index (uint32).
- result = SetElement(boilerplate, element_index, value);
- } else if (key->IsSymbol()) {
- // The key is not an array index.
+ if (key->IsSymbol()) {
+ // If key is a symbol it is not an array element.
Handle<String> name(String::cast(*key));
+ ASSERT(!name->AsArrayIndex(&element_index));
result = SetProperty(boilerplate, name, value, NONE);
+ } else if (key->ToArrayIndex(&element_index)) {
+ // Array index (uint32).
+ result = SetElement(boilerplate, element_index, value);
} else {
// Non-uint32 number.
ASSERT(key->IsNumber());
@@ -1626,7 +1627,8 @@ static Object* Runtime_SetCode(Arguments args) {
}
// Set the code, scope info, formal parameter count,
// and the length of the target function.
- target->set_code(fun->code());
+ target->shared()->set_code(shared->code());
+ target->set_code(shared->code());
target->shared()->set_scope_info(shared->scope_info());
target->shared()->set_length(shared->length());
target->shared()->set_formal_parameter_count(
@@ -6869,7 +6871,7 @@ static Object* Runtime_LazyCompile(Arguments args) {
Handle<JSFunction> function = args.at<JSFunction>(0);
#ifdef DEBUG
- if (FLAG_trace_lazy) {
+ if (FLAG_trace_lazy && !function->shared()->is_compiled()) {
PrintF("[lazy: ");
function->shared()->name()->Print();
PrintF("]\n");
diff --git a/deps/v8/src/runtime.js b/deps/v8/src/runtime.js
index aca19457d..42968104b 100644
--- a/deps/v8/src/runtime.js
+++ b/deps/v8/src/runtime.js
@@ -175,7 +175,7 @@ function ADD(x) {
// Left operand (this) is already a string.
function STRING_ADD_LEFT(y) {
if (!IS_STRING(y)) {
- if (IS_STRING_WRAPPER(y)) {
+ if (IS_STRING_WRAPPER(y) && %_IsStringWrapperSafeForDefaultValueOf(y)) {
y = %_ValueOf(y);
} else {
y = IS_NUMBER(y)
@@ -191,7 +191,7 @@ function STRING_ADD_LEFT(y) {
function STRING_ADD_RIGHT(y) {
var x = this;
if (!IS_STRING(x)) {
- if (IS_STRING_WRAPPER(x)) {
+ if (IS_STRING_WRAPPER(x) && %_IsStringWrapperSafeForDefaultValueOf(x)) {
x = %_ValueOf(x);
} else {
x = IS_NUMBER(x)
@@ -387,11 +387,11 @@ function GET_KEYS() {
// Filter a given key against an object by checking if the object
// has a property with the given key; return the key as a string if
-// it has. Otherwise returns null. Used in for-in statements.
+// it has. Otherwise returns 0 (smi). Used in for-in statements.
function FILTER_KEY(key) {
var string = %ToString(key);
if (%HasProperty(this, string)) return string;
- return null;
+ return 0;
}
diff --git a/deps/v8/src/serialize.cc b/deps/v8/src/serialize.cc
index 3988b4a89..0057d18f1 100644
--- a/deps/v8/src/serialize.cc
+++ b/deps/v8/src/serialize.cc
@@ -680,14 +680,6 @@ void Deserializer::ReadObject(int space_number,
LOG(SnapshotPositionEvent(address, source_->position()));
}
ReadChunk(current, limit, space_number, address);
-
- if (space == Heap::map_space()) {
- ASSERT(size == Map::kSize);
- HeapObject* obj = HeapObject::FromAddress(address);
- Map* map = reinterpret_cast<Map*>(obj);
- map->set_scavenger(Heap::GetScavenger(map->instance_type(),
- map->instance_size()));
- }
}
diff --git a/deps/v8/src/serialize.h b/deps/v8/src/serialize.h
index 6a318f193..d1b668d13 100644
--- a/deps/v8/src/serialize.h
+++ b/deps/v8/src/serialize.h
@@ -248,7 +248,7 @@ class SerializerDeserializer: public ObjectVisitor {
}
static int partial_snapshot_cache_length_;
- static const int kPartialSnapshotCacheCapacity = 1300;
+ static const int kPartialSnapshotCacheCapacity = 1400;
static Object* partial_snapshot_cache_[];
};
diff --git a/deps/v8/src/stub-cache.cc b/deps/v8/src/stub-cache.cc
index 6a0c93e8d..54d93845d 100644
--- a/deps/v8/src/stub-cache.cc
+++ b/deps/v8/src/stub-cache.cc
@@ -789,23 +789,6 @@ Object* StubCache::ComputeCallDebugPrepareStepIn(int argc, Code::Kind kind) {
#endif
-Object* StubCache::ComputeLazyCompile(int argc) {
- Code::Flags flags =
- Code::ComputeFlags(Code::STUB, NOT_IN_LOOP, UNINITIALIZED, NORMAL, argc);
- Object* probe = ProbeCache(flags);
- if (!probe->IsUndefined()) return probe;
- StubCompiler compiler;
- Object* result = FillCache(compiler.CompileLazyCompile(flags));
- if (result->IsCode()) {
- Code* code = Code::cast(result);
- USE(code);
- PROFILE(CodeCreateEvent(Logger::LAZY_COMPILE_TAG,
- code, code->arguments_count()));
- }
- return result;
-}
-
-
void StubCache::Clear() {
for (int i = 0; i < kPrimaryTableSize; i++) {
primary_[i].key = Heap::empty_string();
diff --git a/deps/v8/src/stub-cache.h b/deps/v8/src/stub-cache.h
index 0be32f121..663201b9d 100644
--- a/deps/v8/src/stub-cache.h
+++ b/deps/v8/src/stub-cache.h
@@ -210,8 +210,6 @@ class StubCache : public AllStatic {
static Object* ComputeCallDebugPrepareStepIn(int argc, Code::Kind kind);
#endif
- static Object* ComputeLazyCompile(int argc);
-
// Update cache for entry hash(name, map).
static Code* Set(String* name, Map* map, Code* code);
@@ -357,7 +355,6 @@ class StubCompiler BASE_EMBEDDED {
Object* CompileCallDebugBreak(Code::Flags flags);
Object* CompileCallDebugPrepareStepIn(Code::Flags flags);
#endif
- Object* CompileLazyCompile(Code::Flags flags);
// Static functions for generating parts of stubs.
static void GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
diff --git a/deps/v8/src/third_party/dtoa/dtoa.c b/deps/v8/src/third_party/dtoa/dtoa.c
index 178b3d12d..068ed949d 100644
--- a/deps/v8/src/third_party/dtoa/dtoa.c
+++ b/deps/v8/src/third_party/dtoa/dtoa.c
@@ -270,25 +270,14 @@ Exactly one of IEEE_8087, IEEE_MC68k, VAX, or IBM should be defined.
typedef union { double d; ULong L[2]; } U;
-#ifdef YES_ALIAS
-#define dval(x) x
#ifdef IEEE_8087
-#define word0(x) ((ULong *)&x)[1]
-#define word1(x) ((ULong *)&x)[0]
+#define word0(x) (x).L[1]
+#define word1(x) (x).L[0]
#else
-#define word0(x) ((ULong *)&x)[0]
-#define word1(x) ((ULong *)&x)[1]
-#endif
-#else
-#ifdef IEEE_8087
-#define word0(x) ((U*)&x)->L[1]
-#define word1(x) ((U*)&x)->L[0]
-#else
-#define word0(x) ((U*)&x)->L[0]
-#define word1(x) ((U*)&x)->L[1]
-#endif
-#define dval(x) ((U*)&x)->d
+#define word0(x) (x).L[0]
+#define word1(x) (x).L[1]
#endif
+#define dval(x) (x).d
/* The following definition of Storeinc is appropriate for MIPS processors.
* An alternative that might be better on some machines is
@@ -1108,13 +1097,15 @@ diff
static double
ulp
#ifdef KR_headers
- (x) double x;
+ (dx) double dx;
#else
- (double x)
+ (double dx)
#endif
{
register Long L;
- double a;
+ U x, a;
+
+ dval(x) = dx;
L = (word0(x) & Exp_mask) - (P-1)*Exp_msk1;
#ifndef Avoid_Underflow
@@ -1157,7 +1148,7 @@ b2d
{
ULong *xa, *xa0, w, y, z;
int k;
- double d;
+ U d;
#ifdef VAX
ULong d0, d1;
#else
@@ -1220,9 +1211,9 @@ b2d
static Bigint *
d2b
#ifdef KR_headers
- (d, e, bits) double d; int *e, *bits;
+ (dd, e, bits) double dd; int *e, *bits;
#else
- (double d, int *e, int *bits)
+ (double dd, int *e, int *bits)
#endif
{
Bigint *b;
@@ -1236,6 +1227,8 @@ d2b
d0 = word0(d) >> 16 | word0(d) << 16;
d1 = word1(d) >> 16 | word1(d) << 16;
#else
+ U d;
+ dval(d) = dd;
#define d0 word0(d)
#define d1 word1(d)
#endif
@@ -1368,7 +1361,7 @@ ratio
(Bigint *a, Bigint *b)
#endif
{
- double da, db;
+ U da, db;
int k, ka, kb;
dval(da) = b2d(a, &ka);
@@ -1542,7 +1535,8 @@ strtod
int bb2, bb5, bbe, bd2, bd5, bbbits, bs2, c, dsign,
e, e1, esign, i, j, k, nd, nd0, nf, nz, nz0, sign;
CONST char *s, *s0, *s1;
- double aadj, aadj1, adj, rv, rv0;
+ double aadj;
+ U aadj1, adj, rv, rv0;
Long L;
ULong y, z;
Bigint *bb = NULL, *bb1, *bd = NULL, *bd0, *bs = NULL, *delta = NULL;
@@ -2042,12 +2036,12 @@ strtod
}
if (rounding) {
if (dsign) {
- adj = 1.;
+ dval(adj) = 1.;
goto apply_adj;
}
}
else if (!dsign) {
- adj = -1.;
+ dval(adj) = -1.;
if (!word1(rv)
&& !(word0(rv) & Frac_mask)) {
y = word0(rv) & Exp_mask;
@@ -2059,7 +2053,7 @@ strtod
{
delta = lshift(delta,Log2P);
if (cmp(delta, bs) <= 0)
- adj = -0.5;
+ dval(adj) = -0.5;
}
}
apply_adj:
@@ -2072,26 +2066,26 @@ strtod
if ((word0(rv) & Exp_mask) <=
P*Exp_msk1) {
word0(rv) += P*Exp_msk1;
- dval(rv) += adj*ulp(dval(rv));
+ dval(rv) += dval(adj)*ulp(dval(rv));
word0(rv) -= P*Exp_msk1;
}
else
#endif /*Sudden_Underflow*/
#endif /*Avoid_Underflow*/
- dval(rv) += adj*ulp(dval(rv));
+ dval(rv) += dval(adj)*ulp(dval(rv));
}
break;
}
- adj = ratio(delta, bs);
- if (adj < 1.)
- adj = 1.;
- if (adj <= 0x7ffffffe) {
+ dval(adj) = ratio(delta, bs);
+ if (dval(adj) < 1.)
+ dval(adj) = 1.;
+ if (dval(adj) <= 0x7ffffffe) {
/* adj = rounding ? ceil(adj) : floor(adj); */
- y = adj;
- if (y != adj) {
+ y = dval(adj);
+ if (y != dval(adj)) {
if (!((rounding>>1) ^ dsign))
y++;
- adj = y;
+ dval(adj) = y;
}
}
#ifdef Avoid_Underflow
@@ -2101,21 +2095,21 @@ strtod
#ifdef Sudden_Underflow
if ((word0(rv) & Exp_mask) <= P*Exp_msk1) {
word0(rv) += P*Exp_msk1;
- adj *= ulp(dval(rv));
+ dval(adj) *= ulp(dval(rv));
if (dsign)
- dval(rv) += adj;
+ dval(rv) += dval(adj);
else
- dval(rv) -= adj;
+ dval(rv) -= dval(adj);
word0(rv) -= P*Exp_msk1;
goto cont;
}
#endif /*Sudden_Underflow*/
#endif /*Avoid_Underflow*/
- adj *= ulp(dval(rv));
+ dval(adj) *= ulp(dval(rv));
if (dsign)
- dval(rv) += adj;
+ dval(rv) += dval(adj);
else
- dval(rv) -= adj;
+ dval(rv) -= dval(adj);
goto cont;
}
#endif /*Honor_FLT_ROUNDS*/
@@ -2237,14 +2231,14 @@ strtod
}
if ((aadj = ratio(delta, bs)) <= 2.) {
if (dsign)
- aadj = aadj1 = 1.;
+ aadj = dval(aadj1) = 1.;
else if (word1(rv) || word0(rv) & Bndry_mask) {
#ifndef Sudden_Underflow
if (word1(rv) == Tiny1 && !word0(rv))
goto undfl;
#endif
aadj = 1.;
- aadj1 = -1.;
+ dval(aadj1) = -1.;
}
else {
/* special case -- power of FLT_RADIX to be */
@@ -2254,24 +2248,24 @@ strtod
aadj = 1./FLT_RADIX;
else
aadj *= 0.5;
- aadj1 = -aadj;
+ dval(aadj1) = -aadj;
}
}
else {
aadj *= 0.5;
- aadj1 = dsign ? aadj : -aadj;
+ dval(aadj1) = dsign ? aadj : -aadj;
#ifdef Check_FLT_ROUNDS
switch(Rounding) {
case 2: /* towards +infinity */
- aadj1 -= 0.5;
+ dval(aadj1) -= 0.5;
break;
case 0: /* towards 0 */
case 3: /* towards -infinity */
- aadj1 += 0.5;
+ dval(aadj1) += 0.5;
}
#else
if (Flt_Rounds == 0)
- aadj1 += 0.5;
+ dval(aadj1) += 0.5;
#endif /*Check_FLT_ROUNDS*/
}
y = word0(rv) & Exp_mask;
@@ -2281,8 +2275,8 @@ strtod
if (y == Exp_msk1*(DBL_MAX_EXP+Bias-1)) {
dval(rv0) = dval(rv);
word0(rv) -= P*Exp_msk1;
- adj = aadj1 * ulp(dval(rv));
- dval(rv) += adj;
+ dval(adj) = dval(aadj1) * ulp(dval(rv));
+ dval(rv) += dval(adj);
if ((word0(rv) & Exp_mask) >=
Exp_msk1*(DBL_MAX_EXP+Bias-P)) {
if (word0(rv0) == Big0 && word1(rv0) == Big1)
@@ -2301,19 +2295,19 @@ strtod
if ((z = aadj) <= 0)
z = 1;
aadj = z;
- aadj1 = dsign ? aadj : -aadj;
+ dval(aadj1) = dsign ? aadj : -aadj;
}
word0(aadj1) += (2*P+1)*Exp_msk1 - y;
}
- adj = aadj1 * ulp(dval(rv));
- dval(rv) += adj;
+ dval(adj) = dval(aadj1) * ulp(dval(rv));
+ dval(rv) += dval(adj);
#else
#ifdef Sudden_Underflow
if ((word0(rv) & Exp_mask) <= P*Exp_msk1) {
dval(rv0) = dval(rv);
word0(rv) += P*Exp_msk1;
- adj = aadj1 * ulp(dval(rv));
- dval(rv) += adj;
+ dval(adj) = dval(aadj1) * ulp(dval(rv));
+ dval(rv) += dval(adj);
#ifdef IBM
if ((word0(rv) & Exp_mask) < P*Exp_msk1)
#else
@@ -2331,8 +2325,8 @@ strtod
word0(rv) -= P*Exp_msk1;
}
else {
- adj = aadj1 * ulp(dval(rv));
- dval(rv) += adj;
+ dval(adj) = dval(aadj1) * ulp(dval(rv));
+ dval(rv) += dval(adj);
}
#else /*Sudden_Underflow*/
/* Compute adj so that the IEEE rounding rules will
@@ -2343,12 +2337,12 @@ strtod
* example: 1.2e-307 .
*/
if (y <= (P-1)*Exp_msk1 && aadj > 1.) {
- aadj1 = (double)(int)(aadj + 0.5);
+ dval(aadj1) = (double)(int)(aadj + 0.5);
if (!dsign)
- aadj1 = -aadj1;
+ dval(aadj1) = -dval(aadj1);
}
- adj = aadj1 * ulp(dval(rv));
- dval(rv) += adj;
+ dval(adj) = dval(aadj1) * ulp(dval(rv));
+ dval(rv) += dval(adj);
#endif /*Sudden_Underflow*/
#endif /*Avoid_Underflow*/
}
@@ -2638,10 +2632,10 @@ freedtoa(char *s)
char *
dtoa
#ifdef KR_headers
- (d, mode, ndigits, decpt, sign, rve)
- double d; int mode, ndigits, *decpt, *sign; char **rve;
+ (dd, mode, ndigits, decpt, sign, rve)
+ double dd; int mode, ndigits, *decpt, *sign; char **rve;
#else
- (double d, int mode, int ndigits, int *decpt, int *sign, char **rve)
+ (double dd, int mode, int ndigits, int *decpt, int *sign, char **rve)
#endif
{
/* Arguments ndigits, decpt, sign are similar to those
@@ -2687,7 +2681,8 @@ dtoa
ULong x;
#endif
Bigint *b, *b1, *delta, *mlo, *mhi, *S;
- double d2, ds, eps;
+ double ds;
+ U d2, eps;
char *s, *s0;
#ifdef Honor_FLT_ROUNDS
int rounding;
@@ -2695,6 +2690,8 @@ dtoa
#ifdef SET_INEXACT
int inexact, oldinexact;
#endif
+ U d;
+ dval(d) = dd;
/* In mode 2 and 3 we bias rounding up when there are ties. */
bias_round_up = mode == 2 || mode == 3;
diff --git a/deps/v8/src/top.cc b/deps/v8/src/top.cc
index 2887b7664..82960270b 100644
--- a/deps/v8/src/top.cc
+++ b/deps/v8/src/top.cc
@@ -107,16 +107,15 @@ void Top::IterateThread(ThreadVisitor* v, char* t) {
void Top::Iterate(ObjectVisitor* v, ThreadLocalTop* thread) {
v->VisitPointer(&(thread->pending_exception_));
v->VisitPointer(&(thread->pending_message_obj_));
- v->VisitPointer(
- BitCast<Object**, Script**>(&(thread->pending_message_script_)));
- v->VisitPointer(BitCast<Object**, Context**>(&(thread->context_)));
+ v->VisitPointer(BitCast<Object**>(&(thread->pending_message_script_)));
+ v->VisitPointer(BitCast<Object**>(&(thread->context_)));
v->VisitPointer(&(thread->scheduled_exception_));
for (v8::TryCatch* block = thread->TryCatchHandler();
block != NULL;
block = TRY_CATCH_FROM_ADDRESS(block->next_)) {
- v->VisitPointer(BitCast<Object**, void**>(&(block->exception_)));
- v->VisitPointer(BitCast<Object**, void**>(&(block->message_)));
+ v->VisitPointer(BitCast<Object**>(&(block->exception_)));
+ v->VisitPointer(BitCast<Object**>(&(block->message_)));
}
// Iterate over pointers on native execution stack.
@@ -521,7 +520,6 @@ void Top::PrintStack(StringStream* accumulator) {
void Top::SetFailedAccessCheckCallback(v8::FailedAccessCheckCallback callback) {
- ASSERT(thread_local_.failed_access_check_callback_ == NULL);
thread_local_.failed_access_check_callback_ = callback;
}
@@ -531,8 +529,6 @@ void Top::ReportFailedAccessCheck(JSObject* receiver, v8::AccessType type) {
ASSERT(receiver->IsAccessCheckNeeded());
ASSERT(Top::context());
- // The callers of this method are not expecting a GC.
- AssertNoAllocation no_gc;
// Get the data object from access check info.
JSFunction* constructor = JSFunction::cast(receiver->map()->constructor());
diff --git a/deps/v8/src/type-info.h b/deps/v8/src/type-info.h
index 91ecab8f5..f588e5611 100644
--- a/deps/v8/src/type-info.h
+++ b/deps/v8/src/type-info.h
@@ -54,7 +54,7 @@ class TypeInfo {
static inline TypeInfo Primitive();
// We know it's a number of some sort.
static inline TypeInfo Number();
- // We know it's signed or unsigned 32 bit integer.
+ // We know it's signed 32 bit integer.
static inline TypeInfo Integer32();
// We know it's a Smi.
static inline TypeInfo Smi();
@@ -113,19 +113,15 @@ class TypeInfo {
}
- // Integer32 is an integer that can be represented as either a signed
- // 32-bit integer or as an unsigned 32-bit integer. It has to be
- // in the range [-2^31, 2^32 - 1]. We also have to check for negative 0
- // as it is not an Integer32.
+ // Integer32 is an integer that can be represented as a signed
+ // 32-bit integer. It has to be in the range [-2^31, 2^31 - 1].
+ // We also have to check for negative 0 as it is not an Integer32.
static inline bool IsInt32Double(double value) {
const DoubleRepresentation minus_zero(-0.0);
DoubleRepresentation rep(value);
if (rep.bits == minus_zero.bits) return false;
- if (value >= kMinInt && value <= kMaxUInt32) {
- if (value <= kMaxInt && value == static_cast<int32_t>(value)) {
- return true;
- }
- if (value == static_cast<uint32_t>(value)) return true;
+ if (value >= kMinInt && value <= kMaxInt) {
+ if (value == static_cast<int32_t>(value)) return true;
}
return false;
}
diff --git a/deps/v8/src/utils.h b/deps/v8/src/utils.h
index 236b85e64..d15319c7a 100644
--- a/deps/v8/src/utils.h
+++ b/deps/v8/src/utils.h
@@ -739,7 +739,11 @@ inline Dest BitCast(const Source& source) {
return dest;
}
-} } // namespace v8::internal
+template <class Dest, class Source>
+inline Dest BitCast(Source* const & source) {
+ return BitCast<Dest>(reinterpret_cast<uintptr_t>(source));
+}
+} } // namespace v8::internal
#endif // V8_UTILS_H_
diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc
index c542aef87..61c0a0e65 100644
--- a/deps/v8/src/version.cc
+++ b/deps/v8/src/version.cc
@@ -34,8 +34,8 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 2
#define MINOR_VERSION 3
-#define BUILD_NUMBER 6
-#define PATCH_LEVEL 1
+#define BUILD_NUMBER 8
+#define PATCH_LEVEL 0
#define CANDIDATE_VERSION false
// Define SONAME to have the SCons build the put a specific SONAME into the
diff --git a/deps/v8/src/x64/assembler-x64-inl.h b/deps/v8/src/x64/assembler-x64-inl.h
index c8abd22a6..44159e062 100644
--- a/deps/v8/src/x64/assembler-x64-inl.h
+++ b/deps/v8/src/x64/assembler-x64-inl.h
@@ -350,6 +350,29 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
}
+template<typename StaticVisitor>
+void RelocInfo::Visit() {
+ RelocInfo::Mode mode = rmode();
+ if (mode == RelocInfo::EMBEDDED_OBJECT) {
+ StaticVisitor::VisitPointer(target_object_address());
+ } else if (RelocInfo::IsCodeTarget(mode)) {
+ StaticVisitor::VisitCodeTarget(this);
+ } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
+ StaticVisitor::VisitExternalReference(target_reference_address());
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ } else if (Debug::has_break_points() &&
+ ((RelocInfo::IsJSReturn(mode) &&
+ IsPatchedReturnSequence()) ||
+ (RelocInfo::IsDebugBreakSlot(mode) &&
+ IsPatchedDebugBreakSlotSequence()))) {
+ StaticVisitor::VisitDebugTarget(this);
+#endif
+ } else if (mode == RelocInfo::RUNTIME_ENTRY) {
+ StaticVisitor::VisitRuntimeEntry(this);
+ }
+}
+
+
// -----------------------------------------------------------------------------
// Implementation of Operand
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc
index d90655b09..9ad94ce0f 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/x64/assembler-x64.cc
@@ -253,7 +253,7 @@ Operand::Operand(const Operand& operand, int32_t offset) {
int32_t disp_value = 0;
if (mode == 0x80 || is_baseless) {
// Mode 2 or mode 0 with rbp/r13 as base: Word displacement.
- disp_value = *reinterpret_cast<const int32_t*>(&operand.buf_[disp_offset]);
+ disp_value = *BitCast<const int32_t*>(&operand.buf_[disp_offset]);
} else if (mode == 0x40) {
// Mode 1: Byte displacement.
disp_value = static_cast<signed char>(operand.buf_[disp_offset]);
diff --git a/deps/v8/src/x64/builtins-x64.cc b/deps/v8/src/x64/builtins-x64.cc
index 959b4b034..4f2d2b961 100644
--- a/deps/v8/src/x64/builtins-x64.cc
+++ b/deps/v8/src/x64/builtins-x64.cc
@@ -310,7 +310,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ movsxlq(rbx,
FieldOperand(rdx,
SharedFunctionInfo::kFormalParameterCountOffset));
- __ movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
+ __ movq(rdx, FieldOperand(rdi, JSFunction::kCodeOffset));
__ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
__ cmpq(rax, rbx);
__ j(not_equal,
@@ -1291,6 +1291,26 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
Generate_JSEntryTrampolineHelper(masm, true);
}
+
+void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
+ // Enter an internal frame.
+ __ EnterInternalFrame();
+
+ // Push a copy of the function onto the stack.
+ __ push(rdi);
+
+ __ push(rdi); // Function is also the parameter to the runtime call.
+ __ CallRuntime(Runtime::kLazyCompile, 1);
+ __ pop(rdi);
+
+ // Tear down temporary frame.
+ __ LeaveInternalFrame();
+
+ // Do a tail-call of the compiled function.
+ __ lea(rcx, FieldOperand(rax, Code::kHeaderSize));
+ __ jmp(rcx);
+}
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc
index b6256faf8..e545ffa3d 100644
--- a/deps/v8/src/x64/codegen-x64.cc
+++ b/deps/v8/src/x64/codegen-x64.cc
@@ -201,103 +201,89 @@ void CodeGenerator::Generate(CompilationInfo* info) {
// rsi: callee's context
allocator_->Initialize();
- if (info->mode() == CompilationInfo::PRIMARY) {
- frame_->Enter();
-
- // Allocate space for locals and initialize them.
- frame_->AllocateStackSlots();
-
- // Allocate the local context if needed.
- int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0) {
- Comment cmnt(masm_, "[ allocate local context");
- // Allocate local context.
- // Get outer context and create a new context based on it.
- frame_->PushFunction();
- Result context;
- if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(heap_slots);
- context = frame_->CallStub(&stub, 1);
- } else {
- context = frame_->CallRuntime(Runtime::kNewContext, 1);
- }
+ frame_->Enter();
+
+ // Allocate space for locals and initialize them.
+ frame_->AllocateStackSlots();
+
+ // Allocate the local context if needed.
+ int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ if (heap_slots > 0) {
+ Comment cmnt(masm_, "[ allocate local context");
+ // Allocate local context.
+ // Get outer context and create a new context based on it.
+ frame_->PushFunction();
+ Result context;
+ if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(heap_slots);
+ context = frame_->CallStub(&stub, 1);
+ } else {
+ context = frame_->CallRuntime(Runtime::kNewContext, 1);
+ }
- // Update context local.
- frame_->SaveContextRegister();
+ // Update context local.
+ frame_->SaveContextRegister();
- // Verify that the runtime call result and rsi agree.
- if (FLAG_debug_code) {
- __ cmpq(context.reg(), rsi);
- __ Assert(equal, "Runtime::NewContext should end up in rsi");
- }
+ // Verify that the runtime call result and rsi agree.
+ if (FLAG_debug_code) {
+ __ cmpq(context.reg(), rsi);
+ __ Assert(equal, "Runtime::NewContext should end up in rsi");
}
+ }
- // TODO(1241774): Improve this code:
- // 1) only needed if we have a context
- // 2) no need to recompute context ptr every single time
- // 3) don't copy parameter operand code from SlotOperand!
- {
- Comment cmnt2(masm_, "[ copy context parameters into .context");
- // Note that iteration order is relevant here! If we have the same
- // parameter twice (e.g., function (x, y, x)), and that parameter
- // needs to be copied into the context, it must be the last argument
- // passed to the parameter that needs to be copied. This is a rare
- // case so we don't check for it, instead we rely on the copying
- // order: such a parameter is copied repeatedly into the same
- // context location and thus the last value is what is seen inside
- // the function.
- for (int i = 0; i < scope()->num_parameters(); i++) {
- Variable* par = scope()->parameter(i);
- Slot* slot = par->slot();
- if (slot != NULL && slot->type() == Slot::CONTEXT) {
- // The use of SlotOperand below is safe in unspilled code
- // because the slot is guaranteed to be a context slot.
- //
- // There are no parameters in the global scope.
- ASSERT(!scope()->is_global_scope());
- frame_->PushParameterAt(i);
- Result value = frame_->Pop();
- value.ToRegister();
-
- // SlotOperand loads context.reg() with the context object
- // stored to, used below in RecordWrite.
- Result context = allocator_->Allocate();
- ASSERT(context.is_valid());
- __ movq(SlotOperand(slot, context.reg()), value.reg());
- int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
- Result scratch = allocator_->Allocate();
- ASSERT(scratch.is_valid());
- frame_->Spill(context.reg());
- frame_->Spill(value.reg());
- __ RecordWrite(context.reg(), offset, value.reg(), scratch.reg());
- }
+ // TODO(1241774): Improve this code:
+ // 1) only needed if we have a context
+ // 2) no need to recompute context ptr every single time
+ // 3) don't copy parameter operand code from SlotOperand!
+ {
+ Comment cmnt2(masm_, "[ copy context parameters into .context");
+ // Note that iteration order is relevant here! If we have the same
+ // parameter twice (e.g., function (x, y, x)), and that parameter
+ // needs to be copied into the context, it must be the last argument
+ // passed to the parameter that needs to be copied. This is a rare
+ // case so we don't check for it, instead we rely on the copying
+ // order: such a parameter is copied repeatedly into the same
+ // context location and thus the last value is what is seen inside
+ // the function.
+ for (int i = 0; i < scope()->num_parameters(); i++) {
+ Variable* par = scope()->parameter(i);
+ Slot* slot = par->slot();
+ if (slot != NULL && slot->type() == Slot::CONTEXT) {
+ // The use of SlotOperand below is safe in unspilled code
+ // because the slot is guaranteed to be a context slot.
+ //
+ // There are no parameters in the global scope.
+ ASSERT(!scope()->is_global_scope());
+ frame_->PushParameterAt(i);
+ Result value = frame_->Pop();
+ value.ToRegister();
+
+ // SlotOperand loads context.reg() with the context object
+ // stored to, used below in RecordWrite.
+ Result context = allocator_->Allocate();
+ ASSERT(context.is_valid());
+ __ movq(SlotOperand(slot, context.reg()), value.reg());
+ int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
+ Result scratch = allocator_->Allocate();
+ ASSERT(scratch.is_valid());
+ frame_->Spill(context.reg());
+ frame_->Spill(value.reg());
+ __ RecordWrite(context.reg(), offset, value.reg(), scratch.reg());
}
}
+ }
- // Store the arguments object. This must happen after context
- // initialization because the arguments object may be stored in
- // the context.
- if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) {
- StoreArgumentsObject(true);
- }
+ // Store the arguments object. This must happen after context
+ // initialization because the arguments object may be stored in
+ // the context.
+ if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) {
+ StoreArgumentsObject(true);
+ }
- // Initialize ThisFunction reference if present.
- if (scope()->is_function_scope() && scope()->function() != NULL) {
- frame_->Push(Factory::the_hole_value());
- StoreToSlot(scope()->function()->slot(), NOT_CONST_INIT);
- }
- } else {
- // When used as the secondary compiler for splitting, rbp, rsi,
- // and rdi have been pushed on the stack. Adjust the virtual
- // frame to match this state.
- frame_->Adjust(3);
- allocator_->Unuse(rdi);
-
- // Bind all the bailout labels to the beginning of the function.
- List<CompilationInfo::Bailout*>* bailouts = info->bailouts();
- for (int i = 0; i < bailouts->length(); i++) {
- __ bind(bailouts->at(i)->label());
- }
+ // Initialize ThisFunction reference if present.
+ if (scope()->is_function_scope() && scope()->function() != NULL) {
+ frame_->Push(Factory::the_hole_value());
+ StoreToSlot(scope()->function()->slot(), NOT_CONST_INIT);
}
// Initialize the function return target after the locals are set
@@ -2630,9 +2616,8 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
__ j(is_smi, &build_args);
__ CmpObjectType(rax, JS_FUNCTION_TYPE, rcx);
__ j(not_equal, &build_args);
- __ movq(rax, FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset));
Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
- __ Cmp(FieldOperand(rax, SharedFunctionInfo::kCodeOffset), apply_code);
+ __ Cmp(FieldOperand(rax, JSFunction::kCodeOffset), apply_code);
__ j(not_equal, &build_args);
// Check that applicand is a function.
@@ -3926,7 +3911,7 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
__ movq(rbx, rax);
// If the property has been removed while iterating, we just skip it.
- __ CompareRoot(rbx, Heap::kNullValueRootIndex);
+ __ SmiCompare(rbx, Smi::FromInt(0));
node->continue_target()->Branch(equal);
end_del_check.Bind();
@@ -6041,6 +6026,143 @@ void CodeGenerator::GenerateIsSpecObject(ZoneList<Expression*>* args) {
}
+// Deferred code to check whether the String JavaScript object is safe for using
+// default value of. This code is called after the bit caching this information
+// in the map has been checked with the map for the object in the map_result_
+// register. On return the register map_result_ contains 1 for true and 0 for
+// false.
+class DeferredIsStringWrapperSafeForDefaultValueOf : public DeferredCode {
+ public:
+ DeferredIsStringWrapperSafeForDefaultValueOf(Register object,
+ Register map_result,
+ Register scratch1,
+ Register scratch2)
+ : object_(object),
+ map_result_(map_result),
+ scratch1_(scratch1),
+ scratch2_(scratch2) { }
+
+ virtual void Generate() {
+ Label false_result;
+
+ // Check that map is loaded as expected.
+ if (FLAG_debug_code) {
+ __ cmpq(map_result_, FieldOperand(object_, HeapObject::kMapOffset));
+ __ Assert(equal, "Map not in expected register");
+ }
+
+ // Check for fast case object. Generate false result for slow case object.
+ __ movq(scratch1_, FieldOperand(object_, JSObject::kPropertiesOffset));
+ __ movq(scratch1_, FieldOperand(scratch1_, HeapObject::kMapOffset));
+ __ CompareRoot(scratch1_, Heap::kHashTableMapRootIndex);
+ __ j(equal, &false_result);
+
+ // Look for valueOf symbol in the descriptor array, and indicate false if
+ // found. The type is not checked, so if it is a transition it is a false
+ // negative.
+ __ movq(map_result_,
+ FieldOperand(map_result_, Map::kInstanceDescriptorsOffset));
+ __ movq(scratch1_, FieldOperand(map_result_, FixedArray::kLengthOffset));
+ // map_result_: descriptor array
+ // scratch1_: length of descriptor array
+ // Calculate the end of the descriptor array.
+ SmiIndex index = masm_->SmiToIndex(scratch2_, scratch1_, kPointerSizeLog2);
+ __ lea(scratch1_,
+ Operand(
+ map_result_, index.reg, index.scale, FixedArray::kHeaderSize));
+ // Calculate location of the first key name.
+ __ addq(map_result_,
+ Immediate(FixedArray::kHeaderSize +
+ DescriptorArray::kFirstIndex * kPointerSize));
+ // Loop through all the keys in the descriptor array. If one of these is the
+ // symbol valueOf the result is false.
+ Label entry, loop;
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ movq(scratch2_, FieldOperand(map_result_, 0));
+ __ Cmp(scratch2_, Factory::value_of_symbol());
+ __ j(equal, &false_result);
+ __ addq(map_result_, Immediate(kPointerSize));
+ __ bind(&entry);
+ __ cmpq(map_result_, scratch1_);
+ __ j(not_equal, &loop);
+
+ // Reload map as register map_result_ was used as temporary above.
+ __ movq(map_result_, FieldOperand(object_, HeapObject::kMapOffset));
+
+ // If a valueOf property is not found on the object check that it's
+ // prototype is the un-modified String prototype. If not result is false.
+ __ movq(scratch1_, FieldOperand(map_result_, Map::kPrototypeOffset));
+ __ testq(scratch1_, Immediate(kSmiTagMask));
+ __ j(zero, &false_result);
+ __ movq(scratch1_, FieldOperand(scratch1_, HeapObject::kMapOffset));
+ __ movq(scratch2_,
+ Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ movq(scratch2_,
+ FieldOperand(scratch2_, GlobalObject::kGlobalContextOffset));
+ __ cmpq(scratch1_,
+ CodeGenerator::ContextOperand(
+ scratch2_, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
+ __ j(not_equal, &false_result);
+ // Set the bit in the map to indicate that it has been checked safe for
+ // default valueOf and set true result.
+ __ or_(FieldOperand(map_result_, Map::kBitField2Offset),
+ Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
+ __ Set(map_result_, 1);
+ __ jmp(exit_label());
+ __ bind(&false_result);
+ // Set false result.
+ __ Set(map_result_, 0);
+ }
+
+ private:
+ Register object_;
+ Register map_result_;
+ Register scratch1_;
+ Register scratch2_;
+};
+
+
+void CodeGenerator::GenerateIsStringWrapperSafeForDefaultValueOf(
+ ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ Result obj = frame_->Pop(); // Pop the string wrapper.
+ obj.ToRegister();
+ ASSERT(obj.is_valid());
+ if (FLAG_debug_code) {
+ __ AbortIfSmi(obj.reg());
+ }
+
+ // Check whether this map has already been checked to be safe for default
+ // valueOf.
+ Result map_result = allocator()->Allocate();
+ ASSERT(map_result.is_valid());
+ __ movq(map_result.reg(), FieldOperand(obj.reg(), HeapObject::kMapOffset));
+ __ testb(FieldOperand(map_result.reg(), Map::kBitField2Offset),
+ Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
+ destination()->true_target()->Branch(not_zero);
+
+ // We need an additional two scratch registers for the deferred code.
+ Result temp1 = allocator()->Allocate();
+ ASSERT(temp1.is_valid());
+ Result temp2 = allocator()->Allocate();
+ ASSERT(temp2.is_valid());
+
+ DeferredIsStringWrapperSafeForDefaultValueOf* deferred =
+ new DeferredIsStringWrapperSafeForDefaultValueOf(
+ obj.reg(), map_result.reg(), temp1.reg(), temp2.reg());
+ deferred->Branch(zero);
+ deferred->BindExit();
+ __ testq(map_result.reg(), map_result.reg());
+ obj.Unuse();
+ map_result.Unuse();
+ temp1.Unuse();
+ temp2.Unuse();
+ destination()->Split(not_equal);
+}
+
+
void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
// This generates a fast version of:
// (%_ClassOf(arg) === 'Function')
@@ -8635,6 +8757,12 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
__ movq(FieldOperand(rax, JSFunction::kContextOffset), rsi);
__ movq(FieldOperand(rax, JSFunction::kLiteralsOffset), rbx);
+ // Initialize the code pointer in the function to be the one
+ // found in the shared function info object.
+ __ movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
+ __ movq(FieldOperand(rax, JSFunction::kCodeOffset), rdx);
+
+
// Return and remove the on-stack parameter.
__ ret(1 * kPointerSize);
diff --git a/deps/v8/src/x64/codegen-x64.h b/deps/v8/src/x64/codegen-x64.h
index 2806f5675..14f690eb8 100644
--- a/deps/v8/src/x64/codegen-x64.h
+++ b/deps/v8/src/x64/codegen-x64.h
@@ -347,6 +347,10 @@ class CodeGenerator: public AstVisitor {
// expected arguments. Otherwise return -1.
static int InlineRuntimeCallArgumentsCount(Handle<String> name);
+ static Operand ContextOperand(Register context, int index) {
+ return Operand(context, Context::SlotOffset(index));
+ }
+
private:
// Construction/Destruction
explicit CodeGenerator(MacroAssembler* masm);
@@ -406,10 +410,6 @@ class CodeGenerator: public AstVisitor {
void LoadReference(Reference* ref);
void UnloadReference(Reference* ref);
- static Operand ContextOperand(Register context, int index) {
- return Operand(context, Context::SlotOffset(index));
- }
-
Operand SlotOperand(Slot* slot, Register tmp);
Operand ContextSlotOperandCheckExtensions(Slot* slot,
@@ -611,6 +611,8 @@ class CodeGenerator: public AstVisitor {
void GenerateIsSpecObject(ZoneList<Expression*>* args);
void GenerateIsFunction(ZoneList<Expression*>* args);
void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
+ void GenerateIsStringWrapperSafeForDefaultValueOf(
+ ZoneList<Expression*>* args);
// Support for construct call checks.
void GenerateIsConstructCall(ZoneList<Expression*>* args);
@@ -764,6 +766,18 @@ class TranscendentalCacheStub: public CodeStub {
};
+class ToBooleanStub: public CodeStub {
+ public:
+ ToBooleanStub() { }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ Major MajorKey() { return ToBoolean; }
+ int MinorKey() { return 0; }
+};
+
+
// Flag that indicates how to generate code for the stub GenericBinaryOpStub.
enum GenericBinaryFlags {
NO_GENERIC_BINARY_FLAGS = 0,
diff --git a/deps/v8/src/x64/debug-x64.cc b/deps/v8/src/x64/debug-x64.cc
index 2aa77e775..d5b7e7768 100644
--- a/deps/v8/src/x64/debug-x64.cc
+++ b/deps/v8/src/x64/debug-x64.cc
@@ -202,23 +202,39 @@ void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) {
void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
- masm->Abort("LiveEdit frame dropping is not supported on x64");
+ masm->ret(0);
}
void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
- masm->Abort("LiveEdit frame dropping is not supported on x64");
+ ExternalReference restarter_frame_function_slot =
+ ExternalReference(Debug_Address::RestarterFrameFunctionPointer());
+ __ movq(rax, restarter_frame_function_slot);
+ __ movq(Operand(rax, 0), Immediate(0));
+
+ // We do not know our frame height, but set rsp based on rbp.
+ __ lea(rsp, Operand(rbp, -1 * kPointerSize));
+
+ __ pop(rdi); // Function.
+ __ pop(rbp);
+
+ // Load context from the function.
+ __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+
+ // Get function code.
+ __ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
+ __ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
+
+ // Re-run JSFunction, rdi is function, rsi is context.
+ __ jmp(rdx);
}
+const bool Debug::kFrameDropperSupported = true;
+
#undef __
-Object** Debug::SetUpFrameDropperFrame(StackFrame* bottom_js_frame,
- Handle<Code> code) {
- UNREACHABLE();
- return NULL;
-}
-const int Debug::kFrameDropperFrameSize = -1;
void BreakLocationIterator::ClearDebugBreakAtReturn() {
diff --git a/deps/v8/src/x64/fast-codegen-x64.cc b/deps/v8/src/x64/fast-codegen-x64.cc
deleted file mode 100644
index 13eef0309..000000000
--- a/deps/v8/src/x64/fast-codegen-x64.cc
+++ /dev/null
@@ -1,250 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "codegen-inl.h"
-#include "fast-codegen.h"
-#include "scopes.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm())
-
-Register FastCodeGenerator::accumulator0() { return rax; }
-Register FastCodeGenerator::accumulator1() { return rdx; }
-Register FastCodeGenerator::scratch0() { return rcx; }
-Register FastCodeGenerator::scratch1() { return rdi; }
-Register FastCodeGenerator::receiver_reg() { return rbx; }
-Register FastCodeGenerator::context_reg() { return rsi; }
-
-
-void FastCodeGenerator::EmitLoadReceiver() {
- // Offset 2 is due to return address and saved frame pointer.
- int index = 2 + scope()->num_parameters();
- __ movq(receiver_reg(), Operand(rbp, index * kPointerSize));
-}
-
-
-void FastCodeGenerator::EmitGlobalVariableLoad(Handle<Object> cell) {
- ASSERT(!destination().is(no_reg));
- ASSERT(cell->IsJSGlobalPropertyCell());
-
- __ Move(destination(), cell);
- __ movq(destination(),
- FieldOperand(destination(), JSGlobalPropertyCell::kValueOffset));
- if (FLAG_debug_code) {
- __ Cmp(destination(), Factory::the_hole_value());
- __ Check(not_equal, "DontDelete cells can't contain the hole");
- }
-
- // The loaded value is not known to be a smi.
- clear_as_smi(destination());
-}
-
-
-void FastCodeGenerator::EmitThisPropertyStore(Handle<String> name) {
- LookupResult lookup;
- info()->receiver()->Lookup(*name, &lookup);
-
- ASSERT(lookup.holder() == *info()->receiver());
- ASSERT(lookup.type() == FIELD);
- Handle<Map> map(Handle<HeapObject>::cast(info()->receiver())->map());
- int index = lookup.GetFieldIndex() - map->inobject_properties();
- int offset = index * kPointerSize;
-
- // We will emit the write barrier unless the stored value is statically
- // known to be a smi.
- bool needs_write_barrier = !is_smi(accumulator0());
-
- // Perform the store. Negative offsets are inobject properties.
- if (offset < 0) {
- offset += map->instance_size();
- __ movq(FieldOperand(receiver_reg(), offset), accumulator0());
- if (needs_write_barrier) {
- // Preserve receiver from write barrier.
- __ movq(scratch0(), receiver_reg());
- }
- } else {
- offset += FixedArray::kHeaderSize;
- __ movq(scratch0(),
- FieldOperand(receiver_reg(), JSObject::kPropertiesOffset));
- __ movq(FieldOperand(scratch0(), offset), accumulator0());
- }
-
- if (needs_write_barrier) {
- if (destination().is(no_reg)) {
- // After RecordWrite accumulator0 is only accidently a smi, but it is
- // already marked as not known to be one.
- __ RecordWrite(scratch0(), offset, accumulator0(), scratch1());
- } else {
- // Copy the value to the other accumulator to preserve a copy from the
- // write barrier. One of the accumulators is available as a scratch
- // register. Neither is a smi.
- __ movq(accumulator1(), accumulator0());
- clear_as_smi(accumulator1());
- Register value_scratch = other_accumulator(destination());
- __ RecordWrite(scratch0(), offset, value_scratch, scratch1());
- }
- } else if (destination().is(accumulator1())) {
- __ movq(accumulator1(), accumulator0());
- // Is a smi because we do not need the write barrier.
- set_as_smi(accumulator1());
- }
-}
-
-
-void FastCodeGenerator::EmitThisPropertyLoad(Handle<String> name) {
- ASSERT(!destination().is(no_reg));
- LookupResult lookup;
- info()->receiver()->Lookup(*name, &lookup);
-
- ASSERT(lookup.holder() == *info()->receiver());
- ASSERT(lookup.type() == FIELD);
- Handle<Map> map(Handle<HeapObject>::cast(info()->receiver())->map());
- int index = lookup.GetFieldIndex() - map->inobject_properties();
- int offset = index * kPointerSize;
-
- // Perform the load. Negative offsets are inobject properties.
- if (offset < 0) {
- offset += map->instance_size();
- __ movq(destination(), FieldOperand(receiver_reg(), offset));
- } else {
- offset += FixedArray::kHeaderSize;
- __ movq(scratch0(),
- FieldOperand(receiver_reg(), JSObject::kPropertiesOffset));
- __ movq(destination(), FieldOperand(scratch0(), offset));
- }
-
- // The loaded value is not known to be a smi.
- clear_as_smi(destination());
-}
-
-
-void FastCodeGenerator::EmitBitOr() {
- if (is_smi(accumulator0()) && is_smi(accumulator1())) {
- // If both operands are known to be a smi then there is no need to check
- // the operands or result.
- if (destination().is(no_reg)) {
- __ or_(accumulator1(), accumulator0());
- } else {
- // Leave the result in the destination register. Bitwise or is
- // commutative.
- __ or_(destination(), other_accumulator(destination()));
- }
- } else {
- // Left is in accumulator1, right in accumulator0.
- if (destination().is(accumulator0())) {
- __ movq(scratch0(), accumulator0());
- __ or_(destination(), accumulator1()); // Or is commutative.
- Label* bailout =
- info()->AddBailout(accumulator1(), scratch0()); // Left, right.
- __ JumpIfNotSmi(destination(), bailout);
- } else if (destination().is(accumulator1())) {
- __ movq(scratch0(), accumulator1());
- __ or_(destination(), accumulator0());
- Label* bailout = info()->AddBailout(scratch0(), accumulator0());
- __ JumpIfNotSmi(destination(), bailout);
- } else {
- ASSERT(destination().is(no_reg));
- __ movq(scratch0(), accumulator1());
- __ or_(scratch0(), accumulator0());
- Label* bailout = info()->AddBailout(accumulator1(), accumulator0());
- __ JumpIfNotSmi(scratch0(), bailout);
- }
- }
-
- // If we didn't bailout, the result (in fact, both inputs too) is known to
- // be a smi.
- set_as_smi(accumulator0());
- set_as_smi(accumulator1());
-}
-
-
-void FastCodeGenerator::Generate(CompilationInfo* compilation_info) {
- ASSERT(info_ == NULL);
- info_ = compilation_info;
- Comment cmnt(masm_, "[ function compiled by fast code generator");
-
- // Save the caller's frame pointer and set up our own.
- Comment prologue_cmnt(masm(), ";; Prologue");
- __ push(rbp);
- __ movq(rbp, rsp);
- __ push(rsi); // Context.
- __ push(rdi); // Closure.
- // Note that we keep a live register reference to esi (context) at this
- // point.
-
- Label* bailout_to_beginning = info()->AddBailout();
- // Receiver (this) is allocated to a fixed register.
- if (info()->has_this_properties()) {
- Comment cmnt(masm(), ";; MapCheck(this)");
- if (FLAG_print_ir) {
- PrintF("MapCheck(this)\n");
- }
- ASSERT(info()->has_receiver() && info()->receiver()->IsHeapObject());
- Handle<HeapObject> object = Handle<HeapObject>::cast(info()->receiver());
- Handle<Map> map(object->map());
- EmitLoadReceiver();
- __ CheckMap(receiver_reg(), map, bailout_to_beginning, false);
- }
-
- // If there is a global variable access check if the global object is the
- // same as at lazy-compilation time.
- if (info()->has_globals()) {
- Comment cmnt(masm(), ";; MapCheck(GLOBAL)");
- if (FLAG_print_ir) {
- PrintF("MapCheck(GLOBAL)\n");
- }
- ASSERT(info()->has_global_object());
- Handle<Map> map(info()->global_object()->map());
- __ movq(scratch0(), CodeGenerator::GlobalObject());
- __ CheckMap(scratch0(), map, bailout_to_beginning, true);
- }
-
- VisitStatements(info()->function()->body());
-
- Comment return_cmnt(masm(), ";; Return(<undefined>)");
- if (FLAG_print_ir) {
- PrintF("Return(<undefined>)\n");
- }
- __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
- __ movq(rsp, rbp);
- __ pop(rbp);
- __ ret((scope()->num_parameters() + 1) * kPointerSize);
-}
-
-
-#undef __
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/full-codegen-x64.cc b/deps/v8/src/x64/full-codegen-x64.cc
index 4d74735eb..725cbb0c5 100644
--- a/deps/v8/src/x64/full-codegen-x64.cc
+++ b/deps/v8/src/x64/full-codegen-x64.cc
@@ -54,100 +54,98 @@ namespace internal {
//
// The function builds a JS frame. Please see JavaScriptFrameConstants in
// frames-x64.h for its layout.
-void FullCodeGenerator::Generate(CompilationInfo* info, Mode mode) {
+void FullCodeGenerator::Generate(CompilationInfo* info) {
ASSERT(info_ == NULL);
info_ = info;
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
- if (mode == PRIMARY) {
- __ push(rbp); // Caller's frame pointer.
- __ movq(rbp, rsp);
- __ push(rsi); // Callee's context.
- __ push(rdi); // Callee's JS Function.
-
- { Comment cmnt(masm_, "[ Allocate locals");
- int locals_count = scope()->num_stack_slots();
- if (locals_count == 1) {
- __ PushRoot(Heap::kUndefinedValueRootIndex);
- } else if (locals_count > 1) {
- __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
- for (int i = 0; i < locals_count; i++) {
- __ push(rdx);
- }
+ __ push(rbp); // Caller's frame pointer.
+ __ movq(rbp, rsp);
+ __ push(rsi); // Callee's context.
+ __ push(rdi); // Callee's JS Function.
+
+ { Comment cmnt(masm_, "[ Allocate locals");
+ int locals_count = scope()->num_stack_slots();
+ if (locals_count == 1) {
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
+ } else if (locals_count > 1) {
+ __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
+ for (int i = 0; i < locals_count; i++) {
+ __ push(rdx);
}
}
+ }
- bool function_in_register = true;
+ bool function_in_register = true;
- // Possibly allocate a local context.
- int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0) {
- Comment cmnt(masm_, "[ Allocate local context");
- // Argument to NewContext is the function, which is still in rdi.
- __ push(rdi);
- if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(heap_slots);
- __ CallStub(&stub);
- } else {
- __ CallRuntime(Runtime::kNewContext, 1);
- }
- function_in_register = false;
- // Context is returned in both rax and rsi. It replaces the context
- // passed to us. It's saved in the stack and kept live in rsi.
- __ movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
-
- // Copy any necessary parameters into the context.
- int num_parameters = scope()->num_parameters();
- for (int i = 0; i < num_parameters; i++) {
- Slot* slot = scope()->parameter(i)->slot();
- if (slot != NULL && slot->type() == Slot::CONTEXT) {
- int parameter_offset = StandardFrameConstants::kCallerSPOffset +
- (num_parameters - 1 - i) * kPointerSize;
- // Load parameter from stack.
- __ movq(rax, Operand(rbp, parameter_offset));
- // Store it in the context.
- int context_offset = Context::SlotOffset(slot->index());
- __ movq(Operand(rsi, context_offset), rax);
- // Update the write barrier. This clobbers all involved
- // registers, so we have use a third register to avoid
- // clobbering rsi.
- __ movq(rcx, rsi);
- __ RecordWrite(rcx, context_offset, rax, rbx);
- }
+ // Possibly allocate a local context.
+ int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ if (heap_slots > 0) {
+ Comment cmnt(masm_, "[ Allocate local context");
+ // Argument to NewContext is the function, which is still in rdi.
+ __ push(rdi);
+ if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(heap_slots);
+ __ CallStub(&stub);
+ } else {
+ __ CallRuntime(Runtime::kNewContext, 1);
+ }
+ function_in_register = false;
+ // Context is returned in both rax and rsi. It replaces the context
+ // passed to us. It's saved in the stack and kept live in rsi.
+ __ movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
+
+ // Copy any necessary parameters into the context.
+ int num_parameters = scope()->num_parameters();
+ for (int i = 0; i < num_parameters; i++) {
+ Slot* slot = scope()->parameter(i)->slot();
+ if (slot != NULL && slot->type() == Slot::CONTEXT) {
+ int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+ (num_parameters - 1 - i) * kPointerSize;
+ // Load parameter from stack.
+ __ movq(rax, Operand(rbp, parameter_offset));
+ // Store it in the context.
+ int context_offset = Context::SlotOffset(slot->index());
+ __ movq(Operand(rsi, context_offset), rax);
+ // Update the write barrier. This clobbers all involved
+ // registers, so we have use a third register to avoid
+ // clobbering rsi.
+ __ movq(rcx, rsi);
+ __ RecordWrite(rcx, context_offset, rax, rbx);
}
}
+ }
- // Possibly allocate an arguments object.
- Variable* arguments = scope()->arguments()->AsVariable();
- if (arguments != NULL) {
- // Arguments object must be allocated after the context object, in
- // case the "arguments" or ".arguments" variables are in the context.
- Comment cmnt(masm_, "[ Allocate arguments object");
- if (function_in_register) {
- __ push(rdi);
- } else {
- __ push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- }
- // The receiver is just before the parameters on the caller's stack.
- int offset = scope()->num_parameters() * kPointerSize;
- __ lea(rdx,
- Operand(rbp, StandardFrameConstants::kCallerSPOffset + offset));
- __ push(rdx);
- __ Push(Smi::FromInt(scope()->num_parameters()));
- // Arguments to ArgumentsAccessStub:
- // function, receiver address, parameter count.
- // The stub will rewrite receiver and parameter count if the previous
- // stack frame was an arguments adapter frame.
- ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
- __ CallStub(&stub);
- // Store new arguments object in both "arguments" and ".arguments" slots.
- __ movq(rcx, rax);
- Move(arguments->slot(), rax, rbx, rdx);
- Slot* dot_arguments_slot =
- scope()->arguments_shadow()->AsVariable()->slot();
- Move(dot_arguments_slot, rcx, rbx, rdx);
+ // Possibly allocate an arguments object.
+ Variable* arguments = scope()->arguments()->AsVariable();
+ if (arguments != NULL) {
+ // Arguments object must be allocated after the context object, in
+ // case the "arguments" or ".arguments" variables are in the context.
+ Comment cmnt(masm_, "[ Allocate arguments object");
+ if (function_in_register) {
+ __ push(rdi);
+ } else {
+ __ push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
}
+ // The receiver is just before the parameters on the caller's stack.
+ int offset = scope()->num_parameters() * kPointerSize;
+ __ lea(rdx,
+ Operand(rbp, StandardFrameConstants::kCallerSPOffset + offset));
+ __ push(rdx);
+ __ Push(Smi::FromInt(scope()->num_parameters()));
+ // Arguments to ArgumentsAccessStub:
+ // function, receiver address, parameter count.
+ // The stub will rewrite receiver and parameter count if the previous
+ // stack frame was an arguments adapter frame.
+ ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
+ __ CallStub(&stub);
+ // Store new arguments object in both "arguments" and ".arguments" slots.
+ __ movq(rcx, rax);
+ Move(arguments->slot(), rax, rbx, rdx);
+ Slot* dot_arguments_slot =
+ scope()->arguments_shadow()->AsVariable()->slot();
+ Move(dot_arguments_slot, rcx, rbx, rdx);
}
{ Comment cmnt(masm_, "[ Declarations");
@@ -1053,7 +1051,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ push(rcx); // Enumerable.
__ push(rbx); // Current entry.
__ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
- __ CompareRoot(rax, Heap::kNullValueRootIndex);
+ __ SmiCompare(rax, Smi::FromInt(0));
__ j(equal, loop_statement.continue_target());
__ movq(rbx, rax);
@@ -2059,6 +2057,25 @@ void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
}
+void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
+ ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ VisitForValue(args->at(0), kAccumulator);
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+ // Just indicate false, as %_IsStringWrapperSafeForDefaultValueOf() is only
+ // used in a few functions in runtime.js which should not normally be hit by
+ // this compiler.
+ __ jmp(if_false);
+ Apply(context_, if_true, if_false);
+}
+
+
void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index bab019935..c1954a898 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -582,8 +582,7 @@ void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
// Make sure the code objects in the builtins object and in the
// builtin function are the same.
push(target);
- movq(target, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- movq(target, FieldOperand(target, SharedFunctionInfo::kCodeOffset));
+ movq(target, FieldOperand(rdi, JSFunction::kCodeOffset));
cmpq(target, Operand(rsp, 0));
Assert(equal, "Builtin code object changed");
pop(target);
@@ -783,8 +782,8 @@ void MacroAssembler::SmiCompare(Register dst, Smi* src) {
if (src->value() == 0) {
testq(dst, dst);
} else {
- Move(kScratchRegister, src);
- cmpq(dst, kScratchRegister);
+ Register constant_reg = GetSmiConstant(src);
+ cmpq(dst, constant_reg);
}
}
@@ -1978,10 +1977,17 @@ void MacroAssembler::AbortIfNotNumber(Register object) {
}
+void MacroAssembler::AbortIfSmi(Register object) {
+ Label ok;
+ Condition is_smi = CheckSmi(object);
+ Assert(NegateCondition(is_smi), "Operand is a smi");
+}
+
+
void MacroAssembler::AbortIfNotSmi(Register object) {
Label ok;
Condition is_smi = CheckSmi(object);
- Assert(is_smi, "Operand not a smi");
+ Assert(is_smi, "Operand is not a smi");
}
@@ -2290,7 +2296,7 @@ void MacroAssembler::InvokeFunction(Register function,
movq(rsi, FieldOperand(function, JSFunction::kContextOffset));
movsxlq(rbx,
FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
- movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
+ movq(rdx, FieldOperand(rdi, JSFunction::kCodeOffset));
// Advances rdx to the end of the Code object header, to the start of
// the executable code.
lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index a294ad678..7083224bd 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -582,6 +582,9 @@ class MacroAssembler: public Assembler {
// Abort execution if argument is not a number. Used in debug code.
void AbortIfNotNumber(Register object);
+ // Abort execution if argument is a smi. Used in debug code.
+ void AbortIfSmi(Register object);
+
// Abort execution if argument is not a smi. Used in debug code.
void AbortIfNotSmi(Register object);
diff --git a/deps/v8/src/x64/stub-cache-x64.cc b/deps/v8/src/x64/stub-cache-x64.cc
index 4c15715c2..7aaeab793 100644
--- a/deps/v8/src/x64/stub-cache-x64.cc
+++ b/deps/v8/src/x64/stub-cache-x64.cc
@@ -2039,30 +2039,6 @@ Object* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
}
-// TODO(1241006): Avoid having lazy compile stubs specialized by the
-// number of arguments. It is not needed anymore.
-Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
- // Enter an internal frame.
- __ EnterInternalFrame();
-
- // Push a copy of the function onto the stack.
- __ push(rdi);
-
- __ push(rdi); // function is also the parameter to the runtime call
- __ CallRuntime(Runtime::kLazyCompile, 1);
- __ pop(rdi);
-
- // Tear down temporary frame.
- __ LeaveInternalFrame();
-
- // Do a tail-call of the compiled function.
- __ lea(rcx, FieldOperand(rax, Code::kHeaderSize));
- __ jmp(rcx);
-
- return GetCodeWithFlags(flags, "LazyCompileStub");
-}
-
-
void StubCompiler::GenerateLoadInterceptor(JSObject* object,
JSObject* interceptor_holder,
LookupResult* lookup,
diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc
index 82b93c95c..8bfa51c60 100644
--- a/deps/v8/test/cctest/test-api.cc
+++ b/deps/v8/test/cctest/test-api.cc
@@ -81,12 +81,23 @@ static void ExpectTrue(const char* code) {
}
+static void ExpectFalse(const char* code) {
+ ExpectBoolean(code, false);
+}
+
+
static void ExpectObject(const char* code, Local<Value> expected) {
Local<Value> result = CompileRun(code);
CHECK(result->Equals(expected));
}
+static void ExpectUndefined(const char* code) {
+ Local<Value> result = CompileRun(code);
+ CHECK(result->IsUndefined());
+}
+
+
static int signature_callback_count;
static v8::Handle<Value> IncrementingSignatureCallback(
const v8::Arguments& args) {
@@ -1194,12 +1205,12 @@ v8::Handle<Value> CheckThisNamedPropertySetter(Local<String> property,
return v8::Handle<Value>();
}
-v8::Handle<v8::Boolean> CheckThisIndexedPropertyQuery(
+v8::Handle<v8::Integer> CheckThisIndexedPropertyQuery(
uint32_t index,
const AccessorInfo& info) {
ApiTestFuzzer::Fuzz();
CHECK(info.This()->Equals(bottom));
- return v8::Handle<v8::Boolean>();
+ return v8::Handle<v8::Integer>();
}
@@ -11187,3 +11198,89 @@ THREADED_TEST(TwoByteStringInAsciiCons) {
reresult = CompileRun("str2.charCodeAt(2);");
CHECK_EQ(static_cast<int32_t>('e'), reresult->Int32Value());
}
+
+
+// Failed access check callback that performs a GC on each invocation.
+void FailedAccessCheckCallbackGC(Local<v8::Object> target,
+ v8::AccessType type,
+ Local<v8::Value> data) {
+ i::Heap::CollectAllGarbage(true);
+}
+
+
+TEST(GCInFailedAccessCheckCallback) {
+ // Install a failed access check callback that performs a GC on each
+ // invocation. Then force the callback to be called from va
+
+ v8::V8::Initialize();
+ v8::V8::SetFailedAccessCheckCallbackFunction(&FailedAccessCheckCallbackGC);
+
+ v8::HandleScope scope;
+
+ // Create an ObjectTemplate for global objects and install access
+ // check callbacks that will block access.
+ v8::Handle<v8::ObjectTemplate> global_template = v8::ObjectTemplate::New();
+ global_template->SetAccessCheckCallbacks(NamedGetAccessBlocker,
+ IndexedGetAccessBlocker,
+ v8::Handle<v8::Value>(),
+ false);
+
+ // Create a context and set an x property on it's global object.
+ LocalContext context0(NULL, global_template);
+ context0->Global()->Set(v8_str("x"), v8_num(42));
+ v8::Handle<v8::Object> global0 = context0->Global();
+
+ // Create a context with a different security token so that the
+ // failed access check callback will be called on each access.
+ LocalContext context1(NULL, global_template);
+ context1->Global()->Set(v8_str("other"), global0);
+
+ // Get property with failed access check.
+ ExpectUndefined("other.x");
+
+ // Get element with failed access check.
+ ExpectUndefined("other[0]");
+
+ // Set property with failed access check.
+ v8::Handle<v8::Value> result = CompileRun("other.x = new Object()");
+ CHECK(result->IsObject());
+
+ // Set element with failed access check.
+ result = CompileRun("other[0] = new Object()");
+ CHECK(result->IsObject());
+
+ // Get property attribute with failed access check.
+ ExpectFalse("\'x\' in other");
+
+ // Get property attribute for element with failed access check.
+ ExpectFalse("0 in other");
+
+ // Delete property.
+ ExpectFalse("delete other.x");
+
+ // Delete element.
+ CHECK_EQ(false, global0->Delete(0));
+
+ // DefineAccessor.
+ CHECK_EQ(false,
+ global0->SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("x")));
+
+ // Define JavaScript accessor.
+ ExpectUndefined("Object.prototype.__defineGetter__.call("
+ " other, \'x\', function() { return 42; })");
+
+ // LookupAccessor.
+ ExpectUndefined("Object.prototype.__lookupGetter__.call("
+ " other, \'x\')");
+
+ // HasLocalElement.
+ ExpectFalse("Object.prototype.hasOwnProperty.call(other, \'0\')");
+
+ CHECK_EQ(false, global0->HasRealIndexedProperty(0));
+ CHECK_EQ(false, global0->HasRealNamedProperty(v8_str("x")));
+ CHECK_EQ(false, global0->HasRealNamedCallbackProperty(v8_str("x")));
+
+ // Reset the failed access check callback so it does not influence
+ // the other tests.
+ v8::V8::SetFailedAccessCheckCallbackFunction(NULL);
+}
diff --git a/deps/v8/test/cctest/test-cpu-profiler.cc b/deps/v8/test/cctest/test-cpu-profiler.cc
index 0e6f09d2e..239d8ae69 100644
--- a/deps/v8/test/cctest/test-cpu-profiler.cc
+++ b/deps/v8/test/cctest/test-cpu-profiler.cc
@@ -12,6 +12,7 @@ namespace i = v8::internal;
using i::CodeEntry;
using i::CpuProfile;
+using i::CpuProfiler;
using i::CpuProfilesCollection;
using i::ProfileGenerator;
using i::ProfileNode;
@@ -225,4 +226,18 @@ TEST(TickEvents) {
CHECK_EQ("bbb", bottom_up_ddd_stub_children->last()->entry()->name());
}
+
+// http://crbug/51594
+// This test must not crash.
+TEST(CrashIfStoppingLastNonExistentProfile) {
+ InitializeVM();
+ TestSetup test_setup;
+ CpuProfiler::Setup();
+ CpuProfiler::StartProfiling("1");
+ CpuProfiler::StopProfiling("2");
+ CpuProfiler::StartProfiling("1");
+ CpuProfiler::StopProfiling("");
+ CpuProfiler::TearDown();
+}
+
#endif // ENABLE_LOGGING_AND_PROFILING
diff --git a/deps/v8/test/cctest/test-disasm-ia32.cc b/deps/v8/test/cctest/test-disasm-ia32.cc
index 40fadd8ef..25d2ec03a 100644
--- a/deps/v8/test/cctest/test-disasm-ia32.cc
+++ b/deps/v8/test/cctest/test-disasm-ia32.cc
@@ -194,6 +194,8 @@ TEST(DisasmIa320) {
__ rcl(edx, 1);
__ rcl(edx, 7);
+ __ rcr(edx, 1);
+ __ rcr(edx, 7);
__ sar(edx, 1);
__ sar(edx, 6);
__ sar_cl(edx);
diff --git a/deps/v8/test/cctest/test-heap-profiler.cc b/deps/v8/test/cctest/test-heap-profiler.cc
index 1819aa461..92ad0a400 100644
--- a/deps/v8/test/cctest/test-heap-profiler.cc
+++ b/deps/v8/test/cctest/test-heap-profiler.cc
@@ -396,20 +396,17 @@ class NamedEntriesDetector {
has_A2(false), has_B2(false), has_C2(false) {
}
- void Apply(i::HeapEntry* entry) {
- const char* node_name = entry->name();
- if (strcmp("A1", node_name) == 0
- && entry->GetRetainingPaths()->length() > 0) has_A1 = true;
- if (strcmp("B1", node_name) == 0
- && entry->GetRetainingPaths()->length() > 0) has_B1 = true;
- if (strcmp("C1", node_name) == 0
- && entry->GetRetainingPaths()->length() > 0) has_C1 = true;
- if (strcmp("A2", node_name) == 0
- && entry->GetRetainingPaths()->length() > 0) has_A2 = true;
- if (strcmp("B2", node_name) == 0
- && entry->GetRetainingPaths()->length() > 0) has_B2 = true;
- if (strcmp("C2", node_name) == 0
- && entry->GetRetainingPaths()->length() > 0) has_C2 = true;
+ void Apply(i::HeapEntry** entry_ptr) {
+ if (IsReachableNodeWithName(*entry_ptr, "A1")) has_A1 = true;
+ if (IsReachableNodeWithName(*entry_ptr, "B1")) has_B1 = true;
+ if (IsReachableNodeWithName(*entry_ptr, "C1")) has_C1 = true;
+ if (IsReachableNodeWithName(*entry_ptr, "A2")) has_A2 = true;
+ if (IsReachableNodeWithName(*entry_ptr, "B2")) has_B2 = true;
+ if (IsReachableNodeWithName(*entry_ptr, "C2")) has_C2 = true;
+ }
+
+ static bool IsReachableNodeWithName(i::HeapEntry* entry, const char* name) {
+ return strcmp(name, entry->name()) == 0 && entry->painted_reachable();
}
bool has_A1;
@@ -460,7 +457,7 @@ static bool HasString(const v8::HeapGraphNode* node, const char* contents) {
for (int i = 0, count = node->GetChildrenCount(); i < count; ++i) {
const v8::HeapGraphEdge* prop = node->GetChild(i);
const v8::HeapGraphNode* node = prop->GetToNode();
- if (node->GetType() == v8::HeapGraphNode::STRING) {
+ if (node->GetType() == v8::HeapGraphNode::kString) {
v8::String::AsciiValue node_name(node->GetName());
if (strcmp(contents, *node_name) == 0) return true;
}
@@ -496,26 +493,34 @@ TEST(HeapSnapshot) {
"var c2 = new C2(a2);");
const v8::HeapSnapshot* snapshot_env2 =
v8::HeapProfiler::TakeSnapshot(v8::String::New("env2"));
+ i::HeapSnapshot* i_snapshot_env2 =
+ const_cast<i::HeapSnapshot*>(
+ reinterpret_cast<const i::HeapSnapshot*>(snapshot_env2));
const v8::HeapGraphNode* global_env2 = GetGlobalObject(snapshot_env2);
+ // Paint all nodes reachable from global object.
+ i_snapshot_env2->ClearPaint();
+ const_cast<i::HeapEntry*>(
+ reinterpret_cast<const i::HeapEntry*>(global_env2))->PaintAllReachable();
// Verify, that JS global object of env2 doesn't have '..1'
// properties, but has '..2' properties.
- CHECK_EQ(NULL, GetProperty(global_env2, v8::HeapGraphEdge::PROPERTY, "a1"));
- CHECK_EQ(NULL, GetProperty(global_env2, v8::HeapGraphEdge::PROPERTY, "b1_1"));
- CHECK_EQ(NULL, GetProperty(global_env2, v8::HeapGraphEdge::PROPERTY, "b1_2"));
- CHECK_EQ(NULL, GetProperty(global_env2, v8::HeapGraphEdge::PROPERTY, "c1"));
+ CHECK_EQ(NULL, GetProperty(global_env2, v8::HeapGraphEdge::kProperty, "a1"));
+ CHECK_EQ(
+ NULL, GetProperty(global_env2, v8::HeapGraphEdge::kProperty, "b1_1"));
+ CHECK_EQ(
+ NULL, GetProperty(global_env2, v8::HeapGraphEdge::kProperty, "b1_2"));
+ CHECK_EQ(NULL, GetProperty(global_env2, v8::HeapGraphEdge::kProperty, "c1"));
const v8::HeapGraphNode* a2_node =
- GetProperty(global_env2, v8::HeapGraphEdge::PROPERTY, "a2");
+ GetProperty(global_env2, v8::HeapGraphEdge::kProperty, "a2");
CHECK_NE(NULL, a2_node);
- CHECK_NE(NULL, GetProperty(global_env2, v8::HeapGraphEdge::PROPERTY, "b2_1"));
- CHECK_NE(NULL, GetProperty(global_env2, v8::HeapGraphEdge::PROPERTY, "b2_2"));
- CHECK_NE(NULL, GetProperty(global_env2, v8::HeapGraphEdge::PROPERTY, "c2"));
+ CHECK_NE(
+ NULL, GetProperty(global_env2, v8::HeapGraphEdge::kProperty, "b2_1"));
+ CHECK_NE(
+ NULL, GetProperty(global_env2, v8::HeapGraphEdge::kProperty, "b2_2"));
+ CHECK_NE(NULL, GetProperty(global_env2, v8::HeapGraphEdge::kProperty, "c2"));
// Verify that anything related to '[ABC]1' is not reachable.
NamedEntriesDetector det;
- i::HeapSnapshot* i_snapshot_env2 =
- const_cast<i::HeapSnapshot*>(
- reinterpret_cast<const i::HeapSnapshot*>(snapshot_env2));
i_snapshot_env2->IterateEntries(&det);
CHECK(!det.has_A1);
CHECK(!det.has_B1);
@@ -539,7 +544,7 @@ TEST(HeapSnapshot) {
const v8::HeapGraphEdge* last_edge = path->GetEdge(edges_count - 1);
v8::String::AsciiValue last_edge_name(last_edge->GetName());
if (strcmp("a2", *last_edge_name) == 0
- && last_edge->GetType() == v8::HeapGraphEdge::PROPERTY) {
+ && last_edge->GetType() == v8::HeapGraphEdge::kProperty) {
has_global_obj_a2_ref = true;
continue;
}
@@ -547,19 +552,19 @@ TEST(HeapSnapshot) {
const v8::HeapGraphEdge* prev_edge = path->GetEdge(edges_count - 2);
v8::String::AsciiValue prev_edge_name(prev_edge->GetName());
if (strcmp("x1", *last_edge_name) == 0
- && last_edge->GetType() == v8::HeapGraphEdge::PROPERTY
+ && last_edge->GetType() == v8::HeapGraphEdge::kProperty
&& strcmp("c2", *prev_edge_name) == 0) has_c2_x1_ref = true;
if (strcmp("x2", *last_edge_name) == 0
- && last_edge->GetType() == v8::HeapGraphEdge::PROPERTY
+ && last_edge->GetType() == v8::HeapGraphEdge::kProperty
&& strcmp("c2", *prev_edge_name) == 0) has_c2_x2_ref = true;
if (strcmp("1", *last_edge_name) == 0
- && last_edge->GetType() == v8::HeapGraphEdge::ELEMENT
+ && last_edge->GetType() == v8::HeapGraphEdge::kElement
&& strcmp("c2", *prev_edge_name) == 0) has_c2_1_ref = true;
if (strcmp("x", *last_edge_name) == 0
- && last_edge->GetType() == v8::HeapGraphEdge::CONTEXT_VARIABLE
+ && last_edge->GetType() == v8::HeapGraphEdge::kContextVariable
&& strcmp("b2_1", *prev_edge_name) == 0) has_b2_1_x_ref = true;
if (strcmp("x", *last_edge_name) == 0
- && last_edge->GetType() == v8::HeapGraphEdge::CONTEXT_VARIABLE
+ && last_edge->GetType() == v8::HeapGraphEdge::kContextVariable
&& strcmp("b2_2", *prev_edge_name) == 0) has_b2_2_x_ref = true;
}
CHECK(has_global_obj_a2_ref);
@@ -571,6 +576,73 @@ TEST(HeapSnapshot) {
}
+TEST(HeapSnapshotObjectSizes) {
+ v8::HandleScope scope;
+ LocalContext env;
+
+ // -a-> X1 --a
+ // x -b-> X2 <-|
+ CompileAndRunScript(
+ "function X(a, b) { this.a = a; this.b = b; }\n"
+ "x = new X(new X(), new X());\n"
+ "x.a.a = x.b;");
+ const v8::HeapSnapshot* snapshot =
+ v8::HeapProfiler::TakeSnapshot(v8::String::New("sizes"));
+ const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
+ const v8::HeapGraphNode* x =
+ GetProperty(global, v8::HeapGraphEdge::kProperty, "x");
+ CHECK_NE(NULL, x);
+ const v8::HeapGraphNode* x_prototype =
+ GetProperty(x, v8::HeapGraphEdge::kProperty, "prototype");
+ CHECK_NE(NULL, x_prototype);
+ const v8::HeapGraphNode* x1 =
+ GetProperty(x, v8::HeapGraphEdge::kProperty, "a");
+ CHECK_NE(NULL, x1);
+ const v8::HeapGraphNode* x2 =
+ GetProperty(x, v8::HeapGraphEdge::kProperty, "b");
+ CHECK_NE(NULL, x2);
+ CHECK_EQ(
+ x->GetSelfSize() * 3,
+ x->GetReachableSize() - x_prototype->GetReachableSize());
+ CHECK_EQ(
+ x->GetSelfSize() * 3 + x_prototype->GetSelfSize(), x->GetRetainedSize());
+ CHECK_EQ(
+ x1->GetSelfSize() * 2,
+ x1->GetReachableSize() - x_prototype->GetReachableSize());
+ CHECK_EQ(
+ x1->GetSelfSize(), x1->GetRetainedSize());
+ CHECK_EQ(
+ x2->GetSelfSize(),
+ x2->GetReachableSize() - x_prototype->GetReachableSize());
+ CHECK_EQ(
+ x2->GetSelfSize(), x2->GetRetainedSize());
+}
+
+
+TEST(HeapSnapshotEntryChildren) {
+ v8::HandleScope scope;
+ LocalContext env;
+
+ CompileAndRunScript(
+ "function A() { }\n"
+ "a = new A;");
+ const v8::HeapSnapshot* snapshot =
+ v8::HeapProfiler::TakeSnapshot(v8::String::New("children"));
+ const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
+ for (int i = 0, count = global->GetChildrenCount(); i < count; ++i) {
+ const v8::HeapGraphEdge* prop = global->GetChild(i);
+ CHECK_EQ(global, prop->GetFromNode());
+ }
+ const v8::HeapGraphNode* a =
+ GetProperty(global, v8::HeapGraphEdge::kProperty, "a");
+ CHECK_NE(NULL, a);
+ for (int i = 0, count = a->GetChildrenCount(); i < count; ++i) {
+ const v8::HeapGraphEdge* prop = a->GetChild(i);
+ CHECK_EQ(a, prop->GetFromNode());
+ }
+}
+
+
TEST(HeapSnapshotCodeObjects) {
v8::HandleScope scope;
LocalContext env;
@@ -584,20 +656,20 @@ TEST(HeapSnapshotCodeObjects) {
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
const v8::HeapGraphNode* compiled =
- GetProperty(global, v8::HeapGraphEdge::PROPERTY, "compiled");
+ GetProperty(global, v8::HeapGraphEdge::kProperty, "compiled");
CHECK_NE(NULL, compiled);
- CHECK_EQ(v8::HeapGraphNode::CLOSURE, compiled->GetType());
+ CHECK_EQ(v8::HeapGraphNode::kClosure, compiled->GetType());
const v8::HeapGraphNode* lazy =
- GetProperty(global, v8::HeapGraphEdge::PROPERTY, "lazy");
+ GetProperty(global, v8::HeapGraphEdge::kProperty, "lazy");
CHECK_NE(NULL, lazy);
- CHECK_EQ(v8::HeapGraphNode::CLOSURE, lazy->GetType());
+ CHECK_EQ(v8::HeapGraphNode::kClosure, lazy->GetType());
// Find references to code.
const v8::HeapGraphNode* compiled_code =
- GetProperty(compiled, v8::HeapGraphEdge::INTERNAL, "code");
+ GetProperty(compiled, v8::HeapGraphEdge::kInternal, "code");
CHECK_NE(NULL, compiled_code);
const v8::HeapGraphNode* lazy_code =
- GetProperty(lazy, v8::HeapGraphEdge::INTERNAL, "code");
+ GetProperty(lazy, v8::HeapGraphEdge::kInternal, "code");
CHECK_NE(NULL, lazy_code);
// Verify that non-compiled code doesn't contain references to "x"
@@ -607,7 +679,7 @@ TEST(HeapSnapshotCodeObjects) {
for (int i = 0, count = compiled_code->GetChildrenCount(); i < count; ++i) {
const v8::HeapGraphEdge* prop = compiled_code->GetChild(i);
const v8::HeapGraphNode* node = prop->GetToNode();
- if (node->GetType() == v8::HeapGraphNode::ARRAY) {
+ if (node->GetType() == v8::HeapGraphNode::kArray) {
if (HasString(node, "x")) {
compiled_references_x = true;
break;
@@ -617,7 +689,7 @@ TEST(HeapSnapshotCodeObjects) {
for (int i = 0, count = lazy_code->GetChildrenCount(); i < count; ++i) {
const v8::HeapGraphEdge* prop = lazy_code->GetChild(i);
const v8::HeapGraphNode* node = prop->GetToNode();
- if (node->GetType() == v8::HeapGraphNode::ARRAY) {
+ if (node->GetType() == v8::HeapGraphNode::kArray) {
if (HasString(node, "x")) {
lazy_references_x = true;
break;
@@ -634,11 +706,8 @@ TEST(HeapSnapshotCodeObjects) {
// them to a signed type.
#define CHECK_EQ_UINT64_T(a, b) \
CHECK_EQ(static_cast<int64_t>(a), static_cast<int64_t>(b))
-#define CHECK_NE_UINT64_T(a, b) do \
- { \
- bool ne = a != b; \
- CHECK(ne); \
- } while (false)
+#define CHECK_NE_UINT64_T(a, b) \
+ CHECK((a) != (b)) // NOLINT
TEST(HeapEntryIdsAndGC) {
v8::HandleScope scope;
@@ -662,27 +731,35 @@ TEST(HeapEntryIdsAndGC) {
CHECK_NE_UINT64_T(0, global1->GetId());
CHECK_EQ_UINT64_T(global1->GetId(), global2->GetId());
const v8::HeapGraphNode* A1 =
- GetProperty(global1, v8::HeapGraphEdge::PROPERTY, "A");
+ GetProperty(global1, v8::HeapGraphEdge::kProperty, "A");
+ CHECK_NE(NULL, A1);
const v8::HeapGraphNode* A2 =
- GetProperty(global2, v8::HeapGraphEdge::PROPERTY, "A");
+ GetProperty(global2, v8::HeapGraphEdge::kProperty, "A");
+ CHECK_NE(NULL, A2);
CHECK_NE_UINT64_T(0, A1->GetId());
CHECK_EQ_UINT64_T(A1->GetId(), A2->GetId());
const v8::HeapGraphNode* B1 =
- GetProperty(global1, v8::HeapGraphEdge::PROPERTY, "B");
+ GetProperty(global1, v8::HeapGraphEdge::kProperty, "B");
+ CHECK_NE(NULL, B1);
const v8::HeapGraphNode* B2 =
- GetProperty(global2, v8::HeapGraphEdge::PROPERTY, "B");
+ GetProperty(global2, v8::HeapGraphEdge::kProperty, "B");
+ CHECK_NE(NULL, B2);
CHECK_NE_UINT64_T(0, B1->GetId());
CHECK_EQ_UINT64_T(B1->GetId(), B2->GetId());
const v8::HeapGraphNode* a1 =
- GetProperty(global1, v8::HeapGraphEdge::PROPERTY, "a");
+ GetProperty(global1, v8::HeapGraphEdge::kProperty, "a");
+ CHECK_NE(NULL, a1);
const v8::HeapGraphNode* a2 =
- GetProperty(global2, v8::HeapGraphEdge::PROPERTY, "a");
+ GetProperty(global2, v8::HeapGraphEdge::kProperty, "a");
+ CHECK_NE(NULL, a2);
CHECK_NE_UINT64_T(0, a1->GetId());
CHECK_EQ_UINT64_T(a1->GetId(), a2->GetId());
const v8::HeapGraphNode* b1 =
- GetProperty(global1, v8::HeapGraphEdge::PROPERTY, "b");
+ GetProperty(global1, v8::HeapGraphEdge::kProperty, "b");
+ CHECK_NE(NULL, b1);
const v8::HeapGraphNode* b2 =
- GetProperty(global2, v8::HeapGraphEdge::PROPERTY, "b");
+ GetProperty(global2, v8::HeapGraphEdge::kProperty, "b");
+ CHECK_NE(NULL, b2);
CHECK_NE_UINT64_T(0, b1->GetId());
CHECK_EQ_UINT64_T(b1->GetId(), b2->GetId());
}
@@ -717,15 +794,15 @@ TEST(HeapSnapshotsDiff) {
for (int i = 0, count = additions_root->GetChildrenCount(); i < count; ++i) {
const v8::HeapGraphEdge* prop = additions_root->GetChild(i);
const v8::HeapGraphNode* node = prop->GetToNode();
- if (node->GetType() == v8::HeapGraphNode::OBJECT) {
+ if (node->GetType() == v8::HeapGraphNode::kObject) {
v8::String::AsciiValue node_name(node->GetName());
if (strcmp(*node_name, "A") == 0) {
- CHECK(IsNodeRetainedAs(node, v8::HeapGraphEdge::PROPERTY, "a"));
+ CHECK(IsNodeRetainedAs(node, v8::HeapGraphEdge::kProperty, "a"));
CHECK(!found_A);
found_A = true;
s1_A_id = node->GetId();
} else if (strcmp(*node_name, "B") == 0) {
- CHECK(IsNodeRetainedAs(node, v8::HeapGraphEdge::PROPERTY, "b2"));
+ CHECK(IsNodeRetainedAs(node, v8::HeapGraphEdge::kProperty, "b2"));
CHECK(!found_B);
found_B = true;
}
@@ -741,10 +818,10 @@ TEST(HeapSnapshotsDiff) {
for (int i = 0, count = deletions_root->GetChildrenCount(); i < count; ++i) {
const v8::HeapGraphEdge* prop = deletions_root->GetChild(i);
const v8::HeapGraphNode* node = prop->GetToNode();
- if (node->GetType() == v8::HeapGraphNode::OBJECT) {
+ if (node->GetType() == v8::HeapGraphNode::kObject) {
v8::String::AsciiValue node_name(node->GetName());
if (strcmp(*node_name, "A") == 0) {
- CHECK(IsNodeRetainedAs(node, v8::HeapGraphEdge::PROPERTY, "a"));
+ CHECK(IsNodeRetainedAs(node, v8::HeapGraphEdge::kProperty, "a"));
CHECK(!found_A_del);
found_A_del = true;
s2_A_id = node->GetId();
@@ -756,4 +833,35 @@ TEST(HeapSnapshotsDiff) {
CHECK(s1_A_id != s2_A_id);
}
+
+namespace v8 {
+namespace internal {
+
+class HeapSnapshotTester {
+ public:
+ static int CalculateNetworkSize(JSObject* obj) {
+ return HeapSnapshot::CalculateNetworkSize(obj);
+ }
+};
+
+} } // namespace v8::internal
+
+// http://code.google.com/p/v8/issues/detail?id=822
+// Trying to call CalculateNetworkSize on an object with elements set
+// to non-FixedArray may cause an assertion error in debug builds.
+TEST(Issue822) {
+ v8::HandleScope scope;
+ LocalContext context;
+ const int kElementCount = 260;
+ uint8_t* pixel_data = reinterpret_cast<uint8_t*>(malloc(kElementCount));
+ i::Handle<i::PixelArray> pixels = i::Factory::NewPixelArray(kElementCount,
+ pixel_data);
+ v8::Handle<v8::Object> obj = v8::Object::New();
+ // Set the elements to be the pixels.
+ obj->SetIndexedPropertiesToPixelData(pixel_data, kElementCount);
+ i::Handle<i::JSObject> jsobj = v8::Utils::OpenHandle(*obj);
+ // This call must not cause an assertion error in debug builds.
+ i::HeapSnapshotTester::CalculateNetworkSize(*jsobj);
+}
+
#endif // ENABLE_LOGGING_AND_PROFILING
diff --git a/deps/v8/test/mjsunit/api-call-after-bypassed-exception.js b/deps/v8/test/mjsunit/api-call-after-bypassed-exception.js
index f77b5140c..4a1855881 100644
--- a/deps/v8/test/mjsunit/api-call-after-bypassed-exception.js
+++ b/deps/v8/test/mjsunit/api-call-after-bypassed-exception.js
@@ -1,29 +1,29 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This is a test of making an API call after an exception thrown in JavaScript
// has been bypassed by a return in the finally block.
diff --git a/deps/v8/test/mjsunit/bitops-info.js b/deps/v8/test/mjsunit/bitops-info.js
new file mode 100644
index 000000000..4660fdf96
--- /dev/null
+++ b/deps/v8/test/mjsunit/bitops-info.js
@@ -0,0 +1,77 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+function non_int32() {
+ return 2600822924; // It's not a signed Int32.
+}
+
+function hidden_smi() {
+ return 46512102; // It's a Smi
+}
+
+function hidden_int32() {
+ return 1600822924; // It's a signed Int32.
+}
+
+
+function f() {
+ var x = non_int32(); // Not a constant.
+ var y = hidden_smi(); // Not a constant.
+ var z = hidden_int32();
+ assertEquals(46512102 & 2600822924, 46512102 & x, "1");
+ assertEquals(1600822924 & 2600822924, 1600822924 & x, "2");
+ assertEquals(2600822924 & 2600822924, 2600822924 & x, "3");
+ assertEquals(46512102 & 46512102, 46512102 & y, "4");
+ assertEquals(1600822924 & 46512102, 1600822924 & y, "5");
+ assertEquals(2600822924 & 46512102, 2600822924 & y, "6");
+ assertEquals(46512102 & 1600822924, 46512102 & z, "7");
+ assertEquals(1600822924 & 1600822924, 1600822924 & z, "8");
+ assertEquals(2600822924 & 1600822924, 2600822924 & z, "9");
+ assertEquals(46512102 & 2600822924, y & x, "10");
+ assertEquals(1600822924 & 2600822924, z & x, "11");
+
+ assertEquals(46512102 & 2600822924, x & 46512102, "1rev");
+ assertEquals(1600822924 & 2600822924, x & 1600822924, "2rev");
+ assertEquals(2600822924 & 2600822924, x & 2600822924, "3rev");
+ assertEquals(46512102 & 46512102, y & 46512102, "4rev");
+ assertEquals(1600822924 & 46512102, y & 1600822924, "5rev");
+ assertEquals(2600822924 & 46512102, y & 2600822924, "6rev");
+ assertEquals(46512102 & 1600822924, z & 46512102, "7rev");
+ assertEquals(1600822924 & 1600822924, z & 1600822924, "8rev");
+ assertEquals(2600822924 & 1600822924, z & 2600822924, "9rev");
+ assertEquals(46512102 & 2600822924, x & y, "10rev");
+ assertEquals(1600822924 & 2600822924, x & z, "11rev");
+
+ assertEquals(2600822924 & 2600822924, x & x, "xx");
+ assertEquals(y, y & y, "yy");
+ assertEquals(z, z & z, "zz");
+}
+
+
+for (var i = 0; i < 5; i++) {
+ f();
+}
diff --git a/deps/v8/test/mjsunit/debug-clearbreakpointgroup.js b/deps/v8/test/mjsunit/debug-clearbreakpointgroup.js
index aad6c3aff..e6677f939 100644
--- a/deps/v8/test/mjsunit/debug-clearbreakpointgroup.js
+++ b/deps/v8/test/mjsunit/debug-clearbreakpointgroup.js
@@ -1,117 +1,117 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --expose-debug-as debug
-// Get the Debug object exposed from the debug context global object.
-var Debug = debug.Debug
-
-// Simple function which stores the last debug event.
-var listenerComplete = false;
-var exception = false;
-
-var base_request = '"seq":0,"type":"request","command":"clearbreakpointgroup"';
-var scriptId = null;
-
-function safeEval(code) {
- try {
- return eval('(' + code + ')');
- } catch (e) {
- assertEquals(void 0, e);
- return undefined;
- }
-}
-
-function testArguments(dcp, arguments, success) {
- var request = '{' + base_request + ',"arguments":' + arguments + '}'
- var json_response = dcp.processDebugJSONRequest(request);
- var response = safeEval(json_response);
- if (success) {
- assertTrue(response.success, json_response);
- } else {
- assertFalse(response.success, json_response);
- }
-}
-
-function listener(event, exec_state, event_data, data) {
- try {
- if (event == Debug.DebugEvent.Break) {
- // Get the debug command processor.
- var dcp = exec_state.debugCommandProcessor("unspecified_running_state");
-
- // Clear breakpoint group 1.
- testArguments(dcp, '{"groupId":1}', true);
-
- // Indicate that all was processed.
- listenerComplete = true;
- } else if (event == Debug.DebugEvent.AfterCompile) {
- scriptId = event_data.script().id();
- assertEquals(source, event_data.script().source());
- }
- } catch (e) {
- exception = e
- };
-};
-
-
-// Add the debug event listener.
-Debug.setListener(listener);
-
-var source = 'function f(n) {\nreturn n+1;\n}\nfunction g() {return f(10);}' +
- '\nvar r = g(); g;';
-eval(source);
-
-assertNotNull(scriptId);
-
-var groupId1 = 1;
-var groupId2 = 2;
-// Set a break point and call to invoke the debug event listener.
-var bp1 = Debug.setScriptBreakPointById(scriptId, 1, null, null, groupId1);
-var bp2 = Debug.setScriptBreakPointById(scriptId, 1, null, null, groupId2);
-var bp3 = Debug.setScriptBreakPointById(scriptId, 1, null, null, null);
-var bp4 = Debug.setScriptBreakPointById(scriptId, 3, null, null, groupId1);
-var bp5 = Debug.setScriptBreakPointById(scriptId, 4, null, null, groupId2);
-
-assertEquals(5, Debug.scriptBreakPoints().length);
-
-// Call function 'g' from the compiled script to trigger breakpoint.
-g();
-
-// Make sure that the debug event listener vas invoked.
-assertTrue(listenerComplete,
- "listener did not run to completion: " + exception);
-
-var breakpoints = Debug.scriptBreakPoints();
-assertEquals(3, breakpoints.length);
-var breakpointNumbers = breakpoints.map(
- function(scriptBreakpoint) { return scriptBreakpoint.number(); },
- breakpointNumbers);
-
-// Check that all breakpoints from group 1 were deleted and all the
-// rest are preserved.
-assertEquals([bp2, bp3, bp5].sort(), breakpointNumbers.sort());
-
-assertFalse(exception, "exception in listener");
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+// Get the Debug object exposed from the debug context global object.
+var Debug = debug.Debug
+
+// Simple function which stores the last debug event.
+var listenerComplete = false;
+var exception = false;
+
+var base_request = '"seq":0,"type":"request","command":"clearbreakpointgroup"';
+var scriptId = null;
+
+function safeEval(code) {
+ try {
+ return eval('(' + code + ')');
+ } catch (e) {
+ assertEquals(void 0, e);
+ return undefined;
+ }
+}
+
+function testArguments(dcp, arguments, success) {
+ var request = '{' + base_request + ',"arguments":' + arguments + '}'
+ var json_response = dcp.processDebugJSONRequest(request);
+ var response = safeEval(json_response);
+ if (success) {
+ assertTrue(response.success, json_response);
+ } else {
+ assertFalse(response.success, json_response);
+ }
+}
+
+function listener(event, exec_state, event_data, data) {
+ try {
+ if (event == Debug.DebugEvent.Break) {
+ // Get the debug command processor.
+ var dcp = exec_state.debugCommandProcessor("unspecified_running_state");
+
+ // Clear breakpoint group 1.
+ testArguments(dcp, '{"groupId":1}', true);
+
+ // Indicate that all was processed.
+ listenerComplete = true;
+ } else if (event == Debug.DebugEvent.AfterCompile) {
+ scriptId = event_data.script().id();
+ assertEquals(source, event_data.script().source());
+ }
+ } catch (e) {
+ exception = e
+ };
+};
+
+
+// Add the debug event listener.
+Debug.setListener(listener);
+
+var source = 'function f(n) {\nreturn n+1;\n}\nfunction g() {return f(10);}' +
+ '\nvar r = g(); g;';
+eval(source);
+
+assertNotNull(scriptId);
+
+var groupId1 = 1;
+var groupId2 = 2;
+// Set a break point and call to invoke the debug event listener.
+var bp1 = Debug.setScriptBreakPointById(scriptId, 1, null, null, groupId1);
+var bp2 = Debug.setScriptBreakPointById(scriptId, 1, null, null, groupId2);
+var bp3 = Debug.setScriptBreakPointById(scriptId, 1, null, null, null);
+var bp4 = Debug.setScriptBreakPointById(scriptId, 3, null, null, groupId1);
+var bp5 = Debug.setScriptBreakPointById(scriptId, 4, null, null, groupId2);
+
+assertEquals(5, Debug.scriptBreakPoints().length);
+
+// Call function 'g' from the compiled script to trigger breakpoint.
+g();
+
+// Make sure that the debug event listener vas invoked.
+assertTrue(listenerComplete,
+ "listener did not run to completion: " + exception);
+
+var breakpoints = Debug.scriptBreakPoints();
+assertEquals(3, breakpoints.length);
+var breakpointNumbers = breakpoints.map(
+ function(scriptBreakpoint) { return scriptBreakpoint.number(); },
+ breakpointNumbers);
+
+// Check that all breakpoints from group 1 were deleted and all the
+// rest are preserved.
+assertEquals([bp2, bp3, bp5].sort(), breakpointNumbers.sort());
+
+assertFalse(exception, "exception in listener");
diff --git a/deps/v8/test/mjsunit/debug-evaluate-bool-constructor.js b/deps/v8/test/mjsunit/debug-evaluate-bool-constructor.js
index 809a5ccc6..d26809170 100644
--- a/deps/v8/test/mjsunit/debug-evaluate-bool-constructor.js
+++ b/deps/v8/test/mjsunit/debug-evaluate-bool-constructor.js
@@ -1,80 +1,80 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --expose-debug-as debug
-// Get the Debug object exposed from the debug context global object.
-Debug = debug.Debug
-
-var listenerComplete = false;
-var exception = false;
-
-function listener(event, exec_state, event_data, data) {
- try {
- if (event == Debug.DebugEvent.Break) {
- // Get the debug command processor.
- var dcp = exec_state.debugCommandProcessor();
-
- var request = {
- seq: 0,
- type: 'request',
- command: 'evaluate',
- arguments: {
- expression: 'a',
- frame: 0
- }
- };
- request = JSON.stringify(request);
-
- var resp = dcp.processDebugJSONRequest(request);
- var response = JSON.parse(resp);
- assertTrue(response.success, 'Command failed: ' + resp);
- assertEquals('object', response.body.type);
- assertEquals('Object', response.body.className);
-
- // Indicate that all was processed.
- listenerComplete = true;
- }
- } catch (e) {
- exception = e
- };
-};
-
-// Add the debug event listener.
-Debug.setListener(listener);
-
-function callDebugger() {
- // Add set constructor field to a non-function value.
- var a = {constructor:true};
- debugger;
-}
-
-callDebugger();
-
-
-// Make sure that the debug event listener vas invoked.
-assertFalse(exception, "exception in listener")
-assertTrue(listenerComplete, "listener did not run to completion");
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+// Get the Debug object exposed from the debug context global object.
+Debug = debug.Debug
+
+var listenerComplete = false;
+var exception = false;
+
+function listener(event, exec_state, event_data, data) {
+ try {
+ if (event == Debug.DebugEvent.Break) {
+ // Get the debug command processor.
+ var dcp = exec_state.debugCommandProcessor();
+
+ var request = {
+ seq: 0,
+ type: 'request',
+ command: 'evaluate',
+ arguments: {
+ expression: 'a',
+ frame: 0
+ }
+ };
+ request = JSON.stringify(request);
+
+ var resp = dcp.processDebugJSONRequest(request);
+ var response = JSON.parse(resp);
+ assertTrue(response.success, 'Command failed: ' + resp);
+ assertEquals('object', response.body.type);
+ assertEquals('Object', response.body.className);
+
+ // Indicate that all was processed.
+ listenerComplete = true;
+ }
+ } catch (e) {
+ exception = e
+ };
+};
+
+// Add the debug event listener.
+Debug.setListener(listener);
+
+function callDebugger() {
+ // Add set constructor field to a non-function value.
+ var a = {constructor:true};
+ debugger;
+}
+
+callDebugger();
+
+
+// Make sure that the debug event listener vas invoked.
+assertFalse(exception, "exception in listener")
+assertTrue(listenerComplete, "listener did not run to completion");
diff --git a/deps/v8/test/mjsunit/debug-references.js b/deps/v8/test/mjsunit/debug-references.js
index 452761cf1..ab6c6292e 100644
--- a/deps/v8/test/mjsunit/debug-references.js
+++ b/deps/v8/test/mjsunit/debug-references.js
@@ -1,118 +1,118 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --expose-debug-as debug
-// Get the Debug object exposed from the debug context global object.
-Debug = debug.Debug
-
-listenerComplete = false;
-exception = false;
-
-// The base part of all evaluate requests.
-var base_request = '"seq":0,"type":"request","command":"references"'
-
-function safeEval(code) {
- try {
- return eval('(' + code + ')');
- } catch (e) {
- assertEquals(void 0, e);
- return undefined;
- }
-}
-
-function testRequest(dcp, arguments, success, count) {
- // Generate request with the supplied arguments.
- var request;
- if (arguments) {
- request = '{' + base_request + ',"arguments":' + arguments + '}';
- } else {
- request = '{' + base_request + '}'
- }
-
- // Process the request and check expectation.
- var response = safeEval(dcp.processDebugJSONRequest(request));
- if (success) {
- assertTrue(response.success, request + ' -> ' + response.message);
- assertTrue(response.body instanceof Array);
- if (count) {
- assertEquals(count, response.body.length);
- } else {
- assertTrue(response.body.length > 0);
- }
- } else {
- assertFalse(response.success, request + ' -> ' + response.message);
- }
- assertEquals(response.running, dcp.isRunning(), request + ' -> expected not running');
-}
-
-function listener(event, exec_state, event_data, data) {
- try {
- if (event == Debug.DebugEvent.Break) {
- // Get the debug command processor.
- var dcp = exec_state.debugCommandProcessor("unspecified_running_state");
-
- // Test some illegal references requests.
- testRequest(dcp, void 0, false);
- testRequest(dcp, '{"handle":"a"}', false);
- testRequest(dcp, '{"handle":1}', false);
- testRequest(dcp, '{"type":"referencedBy"}', false);
- testRequest(dcp, '{"type":"constructedBy"}', false);
-
- // Evaluate Point.
- var evaluate_point = '{"seq":0,"type":"request","command":"evaluate",' +
- '"arguments":{"expression":"Point"}}';
- var response = safeEval(dcp.processDebugJSONRequest(evaluate_point));
- assertTrue(response.success, "Evaluation of Point failed");
- var handle = response.body.handle;
-
- // Test some legal references requests.
- testRequest(dcp, '{"handle":' + handle + ',"type":"referencedBy"}', true);
- testRequest(dcp, '{"handle":' + handle + ',"type":"constructedBy"}',
- true, 2);
-
- // Indicate that all was processed.
- listenerComplete = true;
- }
- } catch (e) {
- exception = e
- };
-};
-
-// Add the debug event listener.
-Debug.setListener(listener);
-
-// Test constructor and objects.
-function Point(x, y) { this.x_ = x; this.y_ = y;}
-p = new Point(0,0);
-q = new Point(1,2);
-
-// Enter debugger causing the event listener to be called.
-debugger;
-
-// Make sure that the debug event listener was invoked.
-assertFalse(exception, "exception in listener")
-assertTrue(listenerComplete, "listener did not run to completion");
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+// Get the Debug object exposed from the debug context global object.
+Debug = debug.Debug
+
+listenerComplete = false;
+exception = false;
+
+// The base part of all evaluate requests.
+var base_request = '"seq":0,"type":"request","command":"references"'
+
+function safeEval(code) {
+ try {
+ return eval('(' + code + ')');
+ } catch (e) {
+ assertEquals(void 0, e);
+ return undefined;
+ }
+}
+
+function testRequest(dcp, arguments, success, count) {
+ // Generate request with the supplied arguments.
+ var request;
+ if (arguments) {
+ request = '{' + base_request + ',"arguments":' + arguments + '}';
+ } else {
+ request = '{' + base_request + '}'
+ }
+
+ // Process the request and check expectation.
+ var response = safeEval(dcp.processDebugJSONRequest(request));
+ if (success) {
+ assertTrue(response.success, request + ' -> ' + response.message);
+ assertTrue(response.body instanceof Array);
+ if (count) {
+ assertEquals(count, response.body.length);
+ } else {
+ assertTrue(response.body.length > 0);
+ }
+ } else {
+ assertFalse(response.success, request + ' -> ' + response.message);
+ }
+ assertEquals(response.running, dcp.isRunning(), request + ' -> expected not running');
+}
+
+function listener(event, exec_state, event_data, data) {
+ try {
+ if (event == Debug.DebugEvent.Break) {
+ // Get the debug command processor.
+ var dcp = exec_state.debugCommandProcessor("unspecified_running_state");
+
+ // Test some illegal references requests.
+ testRequest(dcp, void 0, false);
+ testRequest(dcp, '{"handle":"a"}', false);
+ testRequest(dcp, '{"handle":1}', false);
+ testRequest(dcp, '{"type":"referencedBy"}', false);
+ testRequest(dcp, '{"type":"constructedBy"}', false);
+
+ // Evaluate Point.
+ var evaluate_point = '{"seq":0,"type":"request","command":"evaluate",' +
+ '"arguments":{"expression":"Point"}}';
+ var response = safeEval(dcp.processDebugJSONRequest(evaluate_point));
+ assertTrue(response.success, "Evaluation of Point failed");
+ var handle = response.body.handle;
+
+ // Test some legal references requests.
+ testRequest(dcp, '{"handle":' + handle + ',"type":"referencedBy"}', true);
+ testRequest(dcp, '{"handle":' + handle + ',"type":"constructedBy"}',
+ true, 2);
+
+ // Indicate that all was processed.
+ listenerComplete = true;
+ }
+ } catch (e) {
+ exception = e
+ };
+};
+
+// Add the debug event listener.
+Debug.setListener(listener);
+
+// Test constructor and objects.
+function Point(x, y) { this.x_ = x; this.y_ = y;}
+p = new Point(0,0);
+q = new Point(1,2);
+
+// Enter debugger causing the event listener to be called.
+debugger;
+
+// Make sure that the debug event listener was invoked.
+assertFalse(exception, "exception in listener")
+assertTrue(listenerComplete, "listener did not run to completion");
diff --git a/deps/v8/test/mjsunit/debug-stepin-accessor.js b/deps/v8/test/mjsunit/debug-stepin-accessor.js
index 2e593b286..2c9c8c324 100644
--- a/deps/v8/test/mjsunit/debug-stepin-accessor.js
+++ b/deps/v8/test/mjsunit/debug-stepin-accessor.js
@@ -1,248 +1,248 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --expose-debug-as debug
-
-// Get the Debug object exposed from the debug context global object.
-Debug = debug.Debug
-
-var exception = null;
-var state = 1;
-var expected_source_line_text = null;
-var expected_function_name = null;
-
-// Simple debug event handler which first time will cause 'step in' action
-// to get into g.call and than check that execution is stopped inside
-// function 'g'.
-function listener(event, exec_state, event_data, data) {
- try {
- if (event == Debug.DebugEvent.Break) {
- if (state == 1) {
- exec_state.prepareStep(Debug.StepAction.StepIn, 2);
- state = 2;
- } else if (state == 2) {
- assertEquals(expected_source_line_text,
- event_data.sourceLineText());
- assertEquals(expected_function_name, event_data.func().name());
- state = 3;
- }
- }
- } catch(e) {
- exception = e;
- }
-};
-
-// Add the debug event listener.
-Debug.setListener(listener);
-
-
-var c = {
- name: 'name ',
- get getter1() {
- return this.name; // getter 1
- },
- get getter2() {
- return { // getter 2
- 'a': c.name
- };
- },
- set setter1(n) {
- this.name = n; // setter 1
- }
-};
-
-c.__defineGetter__('y', function getterY() {
- return this.name; // getter y
-});
-
-c.__defineGetter__(3, function getter3() {
- return this.name; // getter 3
-});
-
-c.__defineSetter__('y', function setterY(n) {
- this.name = n; // setter y
-});
-
-c.__defineSetter__(3, function setter3(n) {
- this.name = n; // setter 3
-});
-
-var d = {
- 'c': c,
-};
-
-function testGetter1_1() {
- expected_function_name = 'getter1';
- expected_source_line_text = ' return this.name; // getter 1';
- debugger;
- var x = c.getter1;
-}
-
-function testGetter1_2() {
- expected_function_name = 'getter1';
- expected_source_line_text = ' return this.name; // getter 1';
- debugger;
- var x = c['getter1'];
-}
-
-function testGetter1_3() {
- expected_function_name = 'getter1';
- expected_source_line_text = ' return this.name; // getter 1';
- debugger;
- for (var i = 1; i < 2; i++) {
- var x = c['getter' + i];
- }
-}
-
-function testGetter1_4() {
- expected_function_name = 'getter1';
- expected_source_line_text = ' return this.name; // getter 1';
- debugger;
- var x = d.c.getter1;
-}
-
-function testGetter1_5() {
- expected_function_name = 'getter1';
- expected_source_line_text = ' return this.name; // getter 1';
- for (var i = 2; i != 1; i--);
- debugger;
- var x = d.c['getter' + i];
-}
-
-function testGetter2_1() {
- expected_function_name = 'getter2';
- expected_source_line_text = ' return { // getter 2';
- for (var i = 2; i != 1; i--);
- debugger;
- var t = d.c.getter2.name;
-}
-
-
-function testGetterY_1() {
- expected_function_name = 'getterY';
- expected_source_line_text = ' return this.name; // getter y';
- debugger;
- var t = d.c.y;
-}
-
-function testIndexedGetter3_1() {
- expected_function_name = 'getter3';
- expected_source_line_text = ' return this.name; // getter 3';
- debugger;
- var r = d.c[3];
-}
-
-function testSetterY_1() {
- expected_function_name = 'setterY';
- expected_source_line_text = ' this.name = n; // setter y';
- debugger;
- d.c.y = 'www';
-}
-
-function testIndexedSetter3_1() {
- expected_function_name = 'setter3';
- expected_source_line_text = ' this.name = n; // setter 3';
- var i = 3
- debugger;
- d.c[3] = 'www';
-}
-
-function testSetter1_1() {
- expected_function_name = 'setter1';
- expected_source_line_text = ' this.name = n; // setter 1';
- debugger;
- d.c.setter1 = 'aa';
-}
-
-function testSetter1_2() {
- expected_function_name = 'setter1';
- expected_source_line_text = ' this.name = n; // setter 1';
- debugger;
- d.c['setter1'] = 'bb';
-}
-
-function testSetter1_3() {
- expected_function_name = 'setter1';
- expected_source_line_text = ' this.name = n; // setter 1';
- for (var i = 2; i != 1; i--);
- debugger;
- d.c['setter' + i] = i;
-}
-
-var e = {
- name: 'e'
-};
-e.__proto__ = c;
-
-function testProtoGetter1_1() {
- expected_function_name = 'getter1';
- expected_source_line_text = ' return this.name; // getter 1';
- debugger;
- var x = e.getter1;
-}
-
-function testProtoSetter1_1() {
- expected_function_name = 'setter1';
- expected_source_line_text = ' this.name = n; // setter 1';
- debugger;
- e.setter1 = 'aa';
-}
-
-function testProtoIndexedGetter3_1() {
- expected_function_name = 'getter3';
- expected_source_line_text = ' return this.name; // getter 3';
- debugger;
- var x = e[3];
-}
-
-function testProtoIndexedSetter3_1() {
- expected_function_name = 'setter3';
- expected_source_line_text = ' this.name = n; // setter 3';
- debugger;
- e[3] = 'new val';
-}
-
-function testProtoSetter1_2() {
- expected_function_name = 'setter1';
- expected_source_line_text = ' this.name = n; // setter 1';
- for (var i = 2; i != 1; i--);
- debugger;
- e['setter' + i] = 'aa';
-}
-
-for (var n in this) {
- if (n.substr(0, 4) != 'test') {
- continue;
- }
- state = 1;
- this[n]();
- assertNull(exception);
- assertEquals(3, state);
-}
-
-// Get rid of the debug event listener.
-Debug.setListener(null);
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+
+// Get the Debug object exposed from the debug context global object.
+Debug = debug.Debug
+
+var exception = null;
+var state = 1;
+var expected_source_line_text = null;
+var expected_function_name = null;
+
+// Simple debug event handler which first time will cause 'step in' action
+// to get into g.call and than check that execution is stopped inside
+// function 'g'.
+function listener(event, exec_state, event_data, data) {
+ try {
+ if (event == Debug.DebugEvent.Break) {
+ if (state == 1) {
+ exec_state.prepareStep(Debug.StepAction.StepIn, 2);
+ state = 2;
+ } else if (state == 2) {
+ assertEquals(expected_source_line_text,
+ event_data.sourceLineText());
+ assertEquals(expected_function_name, event_data.func().name());
+ state = 3;
+ }
+ }
+ } catch(e) {
+ exception = e;
+ }
+};
+
+// Add the debug event listener.
+Debug.setListener(listener);
+
+
+var c = {
+ name: 'name ',
+ get getter1() {
+ return this.name; // getter 1
+ },
+ get getter2() {
+ return { // getter 2
+ 'a': c.name
+ };
+ },
+ set setter1(n) {
+ this.name = n; // setter 1
+ }
+};
+
+c.__defineGetter__('y', function getterY() {
+ return this.name; // getter y
+});
+
+c.__defineGetter__(3, function getter3() {
+ return this.name; // getter 3
+});
+
+c.__defineSetter__('y', function setterY(n) {
+ this.name = n; // setter y
+});
+
+c.__defineSetter__(3, function setter3(n) {
+ this.name = n; // setter 3
+});
+
+var d = {
+ 'c': c,
+};
+
+function testGetter1_1() {
+ expected_function_name = 'getter1';
+ expected_source_line_text = ' return this.name; // getter 1';
+ debugger;
+ var x = c.getter1;
+}
+
+function testGetter1_2() {
+ expected_function_name = 'getter1';
+ expected_source_line_text = ' return this.name; // getter 1';
+ debugger;
+ var x = c['getter1'];
+}
+
+function testGetter1_3() {
+ expected_function_name = 'getter1';
+ expected_source_line_text = ' return this.name; // getter 1';
+ debugger;
+ for (var i = 1; i < 2; i++) {
+ var x = c['getter' + i];
+ }
+}
+
+function testGetter1_4() {
+ expected_function_name = 'getter1';
+ expected_source_line_text = ' return this.name; // getter 1';
+ debugger;
+ var x = d.c.getter1;
+}
+
+function testGetter1_5() {
+ expected_function_name = 'getter1';
+ expected_source_line_text = ' return this.name; // getter 1';
+ for (var i = 2; i != 1; i--);
+ debugger;
+ var x = d.c['getter' + i];
+}
+
+function testGetter2_1() {
+ expected_function_name = 'getter2';
+ expected_source_line_text = ' return { // getter 2';
+ for (var i = 2; i != 1; i--);
+ debugger;
+ var t = d.c.getter2.name;
+}
+
+
+function testGetterY_1() {
+ expected_function_name = 'getterY';
+ expected_source_line_text = ' return this.name; // getter y';
+ debugger;
+ var t = d.c.y;
+}
+
+function testIndexedGetter3_1() {
+ expected_function_name = 'getter3';
+ expected_source_line_text = ' return this.name; // getter 3';
+ debugger;
+ var r = d.c[3];
+}
+
+function testSetterY_1() {
+ expected_function_name = 'setterY';
+ expected_source_line_text = ' this.name = n; // setter y';
+ debugger;
+ d.c.y = 'www';
+}
+
+function testIndexedSetter3_1() {
+ expected_function_name = 'setter3';
+ expected_source_line_text = ' this.name = n; // setter 3';
+ var i = 3
+ debugger;
+ d.c[3] = 'www';
+}
+
+function testSetter1_1() {
+ expected_function_name = 'setter1';
+ expected_source_line_text = ' this.name = n; // setter 1';
+ debugger;
+ d.c.setter1 = 'aa';
+}
+
+function testSetter1_2() {
+ expected_function_name = 'setter1';
+ expected_source_line_text = ' this.name = n; // setter 1';
+ debugger;
+ d.c['setter1'] = 'bb';
+}
+
+function testSetter1_3() {
+ expected_function_name = 'setter1';
+ expected_source_line_text = ' this.name = n; // setter 1';
+ for (var i = 2; i != 1; i--);
+ debugger;
+ d.c['setter' + i] = i;
+}
+
+var e = {
+ name: 'e'
+};
+e.__proto__ = c;
+
+function testProtoGetter1_1() {
+ expected_function_name = 'getter1';
+ expected_source_line_text = ' return this.name; // getter 1';
+ debugger;
+ var x = e.getter1;
+}
+
+function testProtoSetter1_1() {
+ expected_function_name = 'setter1';
+ expected_source_line_text = ' this.name = n; // setter 1';
+ debugger;
+ e.setter1 = 'aa';
+}
+
+function testProtoIndexedGetter3_1() {
+ expected_function_name = 'getter3';
+ expected_source_line_text = ' return this.name; // getter 3';
+ debugger;
+ var x = e[3];
+}
+
+function testProtoIndexedSetter3_1() {
+ expected_function_name = 'setter3';
+ expected_source_line_text = ' this.name = n; // setter 3';
+ debugger;
+ e[3] = 'new val';
+}
+
+function testProtoSetter1_2() {
+ expected_function_name = 'setter1';
+ expected_source_line_text = ' this.name = n; // setter 1';
+ for (var i = 2; i != 1; i--);
+ debugger;
+ e['setter' + i] = 'aa';
+}
+
+for (var n in this) {
+ if (n.substr(0, 4) != 'test') {
+ continue;
+ }
+ state = 1;
+ this[n]();
+ assertNull(exception);
+ assertEquals(3, state);
+}
+
+// Get rid of the debug event listener.
+Debug.setListener(null);
diff --git a/deps/v8/test/mjsunit/debug-stepin-builtin.js b/deps/v8/test/mjsunit/debug-stepin-builtin.js
index c6a97eac0..d9c606110 100644
--- a/deps/v8/test/mjsunit/debug-stepin-builtin.js
+++ b/deps/v8/test/mjsunit/debug-stepin-builtin.js
@@ -1,78 +1,78 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --expose-debug-as debug
-
-// Get the Debug object exposed from the debug context global object.
-Debug = debug.Debug
-
-var exception = null;
-var state = 1;
-var expected_source_line_text = null;
-var expected_function_name = null;
-
-// Simple debug event handler which first time will cause 'step in' action
-// and than check that execution is paused inside function
-// expected_function_name.
-function listener(event, exec_state, event_data, data) {
- try {
- if (event == Debug.DebugEvent.Break) {
- if (state == 1) {
- exec_state.prepareStep(Debug.StepAction.StepIn, 2);
- state = 2;
- } else if (state == 2) {
- assertEquals(expected_function_name, event_data.func().name());
- assertEquals(expected_source_line_text,
- event_data.sourceLineText());
- state = 3;
- }
- }
- } catch(e) {
- exception = e;
- }
-};
-
-// Add the debug event listener.
-Debug.setListener(listener);
-
-var a = [1,2,3,4,5];
-
-// Test step into function call from a function without local variables.
-function testStepInArraySlice() {
- expected_function_name = 'testStepInArraySlice';
- expected_source_line_text = '} // expected line';
- debugger;
- var s = Array.prototype.slice.call(a, 2,3);
-} // expected line
-
-state = 1;
-testStepInArraySlice();
-assertNull(exception);
-assertEquals(3, state);
-
-// Get rid of the debug event listener.
-Debug.setListener(null);
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+
+// Get the Debug object exposed from the debug context global object.
+Debug = debug.Debug
+
+var exception = null;
+var state = 1;
+var expected_source_line_text = null;
+var expected_function_name = null;
+
+// Simple debug event handler which first time will cause 'step in' action
+// and than check that execution is paused inside function
+// expected_function_name.
+function listener(event, exec_state, event_data, data) {
+ try {
+ if (event == Debug.DebugEvent.Break) {
+ if (state == 1) {
+ exec_state.prepareStep(Debug.StepAction.StepIn, 2);
+ state = 2;
+ } else if (state == 2) {
+ assertEquals(expected_function_name, event_data.func().name());
+ assertEquals(expected_source_line_text,
+ event_data.sourceLineText());
+ state = 3;
+ }
+ }
+ } catch(e) {
+ exception = e;
+ }
+};
+
+// Add the debug event listener.
+Debug.setListener(listener);
+
+var a = [1,2,3,4,5];
+
+// Test step into function call from a function without local variables.
+function testStepInArraySlice() {
+ expected_function_name = 'testStepInArraySlice';
+ expected_source_line_text = '} // expected line';
+ debugger;
+ var s = Array.prototype.slice.call(a, 2,3);
+} // expected line
+
+state = 1;
+testStepInArraySlice();
+assertNull(exception);
+assertEquals(3, state);
+
+// Get rid of the debug event listener.
+Debug.setListener(null);
diff --git a/deps/v8/test/mjsunit/debug-stepin-call-function-stub.js b/deps/v8/test/mjsunit/debug-stepin-call-function-stub.js
index 12f51429b..c5cf8fdf3 100644
--- a/deps/v8/test/mjsunit/debug-stepin-call-function-stub.js
+++ b/deps/v8/test/mjsunit/debug-stepin-call-function-stub.js
@@ -1,115 +1,115 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --expose-debug-as debug
-// Get the Debug object exposed from the debug context global object.
-Debug = debug.Debug
-
-var exception = null;
-var state = 0;
-var expected_function_name = null;
-var expected_source_line_text = null;
-var expected_caller_source_line = null;
-var step_in_count = 2;
-
-// Simple debug event handler which first time will cause 'step in' action
-// to get into g.call and than check that execution is pauesed inside
-// function 'g'.
-function listener(event, exec_state, event_data, data) {
- try {
- if (event == Debug.DebugEvent.Break) {
- if (state == 0) {
- // Step into f().
- exec_state.prepareStep(Debug.StepAction.StepIn, step_in_count);
- state = 2;
- } else if (state == 2) {
- assertEquals(expected_source_line_text,
- event_data.sourceLineText());
- assertEquals(expected_function_name, event_data.func().name());
- state = 3;
- }
- }
- } catch(e) {
- exception = e;
- }
-};
-
-// Add the debug event listener.
-Debug.setListener(listener);
-
-
-function g() {
- return "s"; // expected line
-}
-
-function testFunction() {
- var f = g;
- var s = 1 +f(10);
-}
-
-function g2() {
- return "s2"; // expected line
-}
-
-function testFunction2() {
- var f = g2;
- var s = 1 +f(10, 20);
-}
-
-// Run three times. First time the function will be compiled lazily,
-// second time cached version will be used.
-for (var i = 0; i < 3; i++) {
- state = 0;
- expected_function_name = 'g';
- expected_source_line_text = ' return "s"; // expected line';
- step_in_count = 2;
- // Set a break point and call to invoke the debug event listener.
- Debug.setBreakPoint(testFunction, 1, 0);
- testFunction();
- assertNull(exception);
- assertEquals(3, state);
-}
-
-// Test stepping into function call when a breakpoint is set at the place
-// of call. Use different pair of functions so that g2 is compiled lazily.
-// Run twice: first time function will be compiled lazily, second time
-// cached version will be used.
-for (var i = 0; i < 3; i++) {
- state = 0;
- expected_function_name = 'g2';
- expected_source_line_text = ' return "s2"; // expected line';
- step_in_count = 1;
- // Set a break point and call to invoke the debug event listener.
- Debug.setBreakPoint(testFunction2, 2, 0);
- testFunction2();
- assertNull(exception);
- assertEquals(3, state);
-}
-
-
-// Get rid of the debug event listener.
-Debug.setListener(null);
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+// Get the Debug object exposed from the debug context global object.
+Debug = debug.Debug
+
+var exception = null;
+var state = 0;
+var expected_function_name = null;
+var expected_source_line_text = null;
+var expected_caller_source_line = null;
+var step_in_count = 2;
+
+// Simple debug event handler which first time will cause 'step in' action
+// to get into g.call and than check that execution is pauesed inside
+// function 'g'.
+function listener(event, exec_state, event_data, data) {
+ try {
+ if (event == Debug.DebugEvent.Break) {
+ if (state == 0) {
+ // Step into f().
+ exec_state.prepareStep(Debug.StepAction.StepIn, step_in_count);
+ state = 2;
+ } else if (state == 2) {
+ assertEquals(expected_source_line_text,
+ event_data.sourceLineText());
+ assertEquals(expected_function_name, event_data.func().name());
+ state = 3;
+ }
+ }
+ } catch(e) {
+ exception = e;
+ }
+};
+
+// Add the debug event listener.
+Debug.setListener(listener);
+
+
+function g() {
+ return "s"; // expected line
+}
+
+function testFunction() {
+ var f = g;
+ var s = 1 +f(10);
+}
+
+function g2() {
+ return "s2"; // expected line
+}
+
+function testFunction2() {
+ var f = g2;
+ var s = 1 +f(10, 20);
+}
+
+// Run three times. First time the function will be compiled lazily,
+// second time cached version will be used.
+for (var i = 0; i < 3; i++) {
+ state = 0;
+ expected_function_name = 'g';
+ expected_source_line_text = ' return "s"; // expected line';
+ step_in_count = 2;
+ // Set a break point and call to invoke the debug event listener.
+ Debug.setBreakPoint(testFunction, 1, 0);
+ testFunction();
+ assertNull(exception);
+ assertEquals(3, state);
+}
+
+// Test stepping into function call when a breakpoint is set at the place
+// of call. Use different pair of functions so that g2 is compiled lazily.
+// Run twice: first time function will be compiled lazily, second time
+// cached version will be used.
+for (var i = 0; i < 3; i++) {
+ state = 0;
+ expected_function_name = 'g2';
+ expected_source_line_text = ' return "s2"; // expected line';
+ step_in_count = 1;
+ // Set a break point and call to invoke the debug event listener.
+ Debug.setBreakPoint(testFunction2, 2, 0);
+ testFunction2();
+ assertNull(exception);
+ assertEquals(3, state);
+}
+
+
+// Get rid of the debug event listener.
+Debug.setListener(null);
diff --git a/deps/v8/test/mjsunit/debug-stepin-function-call.js b/deps/v8/test/mjsunit/debug-stepin-function-call.js
index 9f24c017c..385fcb2f8 100644
--- a/deps/v8/test/mjsunit/debug-stepin-function-call.js
+++ b/deps/v8/test/mjsunit/debug-stepin-function-call.js
@@ -1,149 +1,149 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --expose-debug-as debug
-// Get the Debug object exposed from the debug context global object.
-Debug = debug.Debug
-
-var exception = null;
-var state = 0;
-
-// Simple debug event handler which first time will cause 'step in' action
-// to get into g.call and than check that execution is pauesed inside
-// function 'g'.
-function listener(event, exec_state, event_data, data) {
- try {
- if (event == Debug.DebugEvent.Break) {
- if (state == 0) {
- // Step into f2.call:
- exec_state.prepareStep(Debug.StepAction.StepIn, 2);
- state = 2;
- } else if (state == 2) {
- assertEquals('g', event_data.func().name());
- assertEquals(' return t + 1; // expected line',
- event_data.sourceLineText());
- state = 3;
- }
- }
- } catch(e) {
- exception = e;
- }
-};
-
-// Add the debug event listener.
-Debug.setListener(listener);
-
-
-// Sample functions.
-function g(t) {
- return t + 1; // expected line
-}
-
-// Test step into function call from a function without local variables.
-function call1() {
- debugger;
- g.call(null, 3);
-}
-
-
-// Test step into function call from a function with some local variables.
-function call2() {
- var aLocalVar = 'test';
- var anotherLocalVar = g(aLocalVar) + 's';
- var yetAnotherLocal = 10;
- debugger;
- g.call(null, 3);
-}
-
-// Test step into function call which is a part of an expression.
-function call3() {
- var alias = g;
- debugger;
- var r = 10 + alias.call(null, 3);
- var aLocalVar = 'test';
- var anotherLocalVar = g(aLocalVar) + 's';
- var yetAnotherLocal = 10;
-}
-
-// Test step into function call from a function with some local variables.
-function call4() {
- var alias = g;
- debugger;
- alias.call(null, 3);
- var aLocalVar = 'test';
- var anotherLocalVar = g(aLocalVar) + 's';
- var yetAnotherLocal = 10;
-}
-
-// Test step into function apply from a function without local variables.
-function apply1() {
- debugger;
- g.apply(null, [3]);
-}
-
-
-// Test step into function apply from a function with some local variables.
-function apply2() {
- var aLocalVar = 'test';
- var anotherLocalVar = g(aLocalVar) + 's';
- var yetAnotherLocal = 10;
- debugger;
- g.apply(null, [3, 4]);
-}
-
-// Test step into function apply which is a part of an expression.
-function apply3() {
- var alias = g;
- debugger;
- var r = 10 + alias.apply(null, [3, 'unused arg']);
- var aLocalVar = 'test';
- var anotherLocalVar = g(aLocalVar) + 's';
- var yetAnotherLocal = 10;
-}
-
-// Test step into function apply from a function with some local variables.
-function apply4() {
- var alias = g;
- debugger;
- alias.apply(null, [3]);
- var aLocalVar = 'test';
- var anotherLocalVar = g(aLocalVar) + 's';
- var yetAnotherLocal = 10;
-}
-
-var testFunctions =
- [call1, call2, call3, call4, apply1, apply2, apply3, apply4];
-
-for (var i = 0; i < testFunctions.length; i++) {
- state = 0;
- testFunctions[i]();
- assertNull(exception);
- assertEquals(3, state);
-}
-
-// Get rid of the debug event listener.
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+// Get the Debug object exposed from the debug context global object.
+Debug = debug.Debug
+
+var exception = null;
+var state = 0;
+
+// Simple debug event handler which first time will cause 'step in' action
+// to get into g.call and than check that execution is pauesed inside
+// function 'g'.
+function listener(event, exec_state, event_data, data) {
+ try {
+ if (event == Debug.DebugEvent.Break) {
+ if (state == 0) {
+ // Step into f2.call:
+ exec_state.prepareStep(Debug.StepAction.StepIn, 2);
+ state = 2;
+ } else if (state == 2) {
+ assertEquals('g', event_data.func().name());
+ assertEquals(' return t + 1; // expected line',
+ event_data.sourceLineText());
+ state = 3;
+ }
+ }
+ } catch(e) {
+ exception = e;
+ }
+};
+
+// Add the debug event listener.
+Debug.setListener(listener);
+
+
+// Sample functions.
+function g(t) {
+ return t + 1; // expected line
+}
+
+// Test step into function call from a function without local variables.
+function call1() {
+ debugger;
+ g.call(null, 3);
+}
+
+
+// Test step into function call from a function with some local variables.
+function call2() {
+ var aLocalVar = 'test';
+ var anotherLocalVar = g(aLocalVar) + 's';
+ var yetAnotherLocal = 10;
+ debugger;
+ g.call(null, 3);
+}
+
+// Test step into function call which is a part of an expression.
+function call3() {
+ var alias = g;
+ debugger;
+ var r = 10 + alias.call(null, 3);
+ var aLocalVar = 'test';
+ var anotherLocalVar = g(aLocalVar) + 's';
+ var yetAnotherLocal = 10;
+}
+
+// Test step into function call from a function with some local variables.
+function call4() {
+ var alias = g;
+ debugger;
+ alias.call(null, 3);
+ var aLocalVar = 'test';
+ var anotherLocalVar = g(aLocalVar) + 's';
+ var yetAnotherLocal = 10;
+}
+
+// Test step into function apply from a function without local variables.
+function apply1() {
+ debugger;
+ g.apply(null, [3]);
+}
+
+
+// Test step into function apply from a function with some local variables.
+function apply2() {
+ var aLocalVar = 'test';
+ var anotherLocalVar = g(aLocalVar) + 's';
+ var yetAnotherLocal = 10;
+ debugger;
+ g.apply(null, [3, 4]);
+}
+
+// Test step into function apply which is a part of an expression.
+function apply3() {
+ var alias = g;
+ debugger;
+ var r = 10 + alias.apply(null, [3, 'unused arg']);
+ var aLocalVar = 'test';
+ var anotherLocalVar = g(aLocalVar) + 's';
+ var yetAnotherLocal = 10;
+}
+
+// Test step into function apply from a function with some local variables.
+function apply4() {
+ var alias = g;
+ debugger;
+ alias.apply(null, [3]);
+ var aLocalVar = 'test';
+ var anotherLocalVar = g(aLocalVar) + 's';
+ var yetAnotherLocal = 10;
+}
+
+var testFunctions =
+ [call1, call2, call3, call4, apply1, apply2, apply3, apply4];
+
+for (var i = 0; i < testFunctions.length; i++) {
+ state = 0;
+ testFunctions[i]();
+ assertNull(exception);
+ assertEquals(3, state);
+}
+
+// Get rid of the debug event listener.
Debug.setListener(null); \ No newline at end of file
diff --git a/deps/v8/test/mjsunit/debug-stepnext-do-while.js b/deps/v8/test/mjsunit/debug-stepnext-do-while.js
index 17058a7b6..bbb18bc43 100644
--- a/deps/v8/test/mjsunit/debug-stepnext-do-while.js
+++ b/deps/v8/test/mjsunit/debug-stepnext-do-while.js
@@ -1,79 +1,79 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --expose-debug-as debug
-// Get the Debug object exposed from the debug context global object.
-Debug = debug.Debug
-
-var exception = null;
-var break_break_point_hit_count = 0;
-
-function listener(event, exec_state, event_data, data) {
- try {
- if (event == Debug.DebugEvent.Break) {
- if (break_break_point_hit_count == 0) {
- assertEquals(' debugger;',
- event_data.sourceLineText());
- assertEquals('runDoWhile', event_data.func().name());
- } else if (break_break_point_hit_count == 1) {
- assertEquals(' } while(condition());',
- event_data.sourceLineText());
- assertEquals('runDoWhile', event_data.func().name());
- }
-
- break_break_point_hit_count++;
- // Continue stepping until returned to bottom frame.
- if (exec_state.frameCount() > 1) {
- exec_state.prepareStep(Debug.StepAction.StepNext);
- }
-
- }
- } catch(e) {
- exception = e;
- }
-};
-
-// Add the debug event listener.
-Debug.setListener(listener);
-
-function condition() {
- return false;
-}
-
-function runDoWhile() {
- do {
- debugger;
- } while(condition());
-};
-
-break_break_point_hit_count = 0;
-runDoWhile();
-assertNull(exception);
-assertEquals(4, break_break_point_hit_count);
-
-// Get rid of the debug event listener.
-Debug.setListener(null);
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+// Get the Debug object exposed from the debug context global object.
+Debug = debug.Debug
+
+var exception = null;
+var break_break_point_hit_count = 0;
+
+function listener(event, exec_state, event_data, data) {
+ try {
+ if (event == Debug.DebugEvent.Break) {
+ if (break_break_point_hit_count == 0) {
+ assertEquals(' debugger;',
+ event_data.sourceLineText());
+ assertEquals('runDoWhile', event_data.func().name());
+ } else if (break_break_point_hit_count == 1) {
+ assertEquals(' } while(condition());',
+ event_data.sourceLineText());
+ assertEquals('runDoWhile', event_data.func().name());
+ }
+
+ break_break_point_hit_count++;
+ // Continue stepping until returned to bottom frame.
+ if (exec_state.frameCount() > 1) {
+ exec_state.prepareStep(Debug.StepAction.StepNext);
+ }
+
+ }
+ } catch(e) {
+ exception = e;
+ }
+};
+
+// Add the debug event listener.
+Debug.setListener(listener);
+
+function condition() {
+ return false;
+}
+
+function runDoWhile() {
+ do {
+ debugger;
+ } while(condition());
+};
+
+break_break_point_hit_count = 0;
+runDoWhile();
+assertNull(exception);
+assertEquals(4, break_break_point_hit_count);
+
+// Get rid of the debug event listener.
+Debug.setListener(null);
diff --git a/deps/v8/test/mjsunit/debug-stepout-recursive-function.js b/deps/v8/test/mjsunit/debug-stepout-recursive-function.js
index 2f8780c95..475fe2659 100644
--- a/deps/v8/test/mjsunit/debug-stepout-recursive-function.js
+++ b/deps/v8/test/mjsunit/debug-stepout-recursive-function.js
@@ -1,106 +1,106 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --expose-debug-as debug
-// Get the Debug object exposed from the debug context global object.
-Debug = debug.Debug
-
-var exception = null;
-var step_out_count = 1;
-
-// Simple debug event handler which counts the number of breaks hit and steps.
-var break_point_hit_count = 0;
-function listener(event, exec_state, event_data, data) {
- try {
- if (event == Debug.DebugEvent.Break) {
- break_point_hit_count++;
- // Continue stepping until returned to bottom frame.
- if (exec_state.frameCount() > 1) {
- exec_state.prepareStep(Debug.StepAction.StepOut, step_out_count);
- }
-
- }
- } catch(e) {
- exception = e;
- }
-
-};
-
-function BeginTest(name) {
- test_name = name;
- break_point_hit_count = 0;
- exception = null;
-}
-
-function EndTest(expected_break_point_hit_count) {
- assertEquals(expected_break_point_hit_count, break_point_hit_count, test_name);
- assertNull(exception, test_name);
- test_name = null;
-}
-
-// Add the debug event listener.
-Debug.setListener(listener);
-
-
-var shouldBreak = null;
-function fact(x) {
- if (shouldBreak(x)) {
- debugger;
- }
- if (x < 2) {
- return 1;
- } else {
- return x*fact(x-1);
- }
-}
-
-BeginTest('Test 1');
-shouldBreak = function(x) { return x == 3; };
-step_out_count = 1;
-fact(3);
-EndTest(2);
-
-BeginTest('Test 2');
-shouldBreak = function(x) { return x == 2; };
-step_out_count = 1;
-fact(3);
-EndTest(3);
-
-BeginTest('Test 3');
-shouldBreak = function(x) { return x == 1; };
-step_out_count = 2;
-fact(3);
-EndTest(2);
-
-BeginTest('Test 4');
-shouldBreak = function(x) { print(x); return x == 1 || x == 3; };
-step_out_count = 2;
-fact(3);
-EndTest(3);
-
-// Get rid of the debug event listener.
-Debug.setListener(null);
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+// Get the Debug object exposed from the debug context global object.
+Debug = debug.Debug
+
+var exception = null;
+var step_out_count = 1;
+
+// Simple debug event handler which counts the number of breaks hit and steps.
+var break_point_hit_count = 0;
+function listener(event, exec_state, event_data, data) {
+ try {
+ if (event == Debug.DebugEvent.Break) {
+ break_point_hit_count++;
+ // Continue stepping until returned to bottom frame.
+ if (exec_state.frameCount() > 1) {
+ exec_state.prepareStep(Debug.StepAction.StepOut, step_out_count);
+ }
+
+ }
+ } catch(e) {
+ exception = e;
+ }
+
+};
+
+function BeginTest(name) {
+ test_name = name;
+ break_point_hit_count = 0;
+ exception = null;
+}
+
+function EndTest(expected_break_point_hit_count) {
+ assertEquals(expected_break_point_hit_count, break_point_hit_count, test_name);
+ assertNull(exception, test_name);
+ test_name = null;
+}
+
+// Add the debug event listener.
+Debug.setListener(listener);
+
+
+var shouldBreak = null;
+function fact(x) {
+ if (shouldBreak(x)) {
+ debugger;
+ }
+ if (x < 2) {
+ return 1;
+ } else {
+ return x*fact(x-1);
+ }
+}
+
+BeginTest('Test 1');
+shouldBreak = function(x) { return x == 3; };
+step_out_count = 1;
+fact(3);
+EndTest(2);
+
+BeginTest('Test 2');
+shouldBreak = function(x) { return x == 2; };
+step_out_count = 1;
+fact(3);
+EndTest(3);
+
+BeginTest('Test 3');
+shouldBreak = function(x) { return x == 1; };
+step_out_count = 2;
+fact(3);
+EndTest(2);
+
+BeginTest('Test 4');
+shouldBreak = function(x) { print(x); return x == 1 || x == 3; };
+step_out_count = 2;
+fact(3);
+EndTest(3);
+
+// Get rid of the debug event listener.
+Debug.setListener(null);
diff --git a/deps/v8/test/mjsunit/debug-stepout-to-builtin.js b/deps/v8/test/mjsunit/debug-stepout-to-builtin.js
index 486eee0e4..772fb4b5e 100644
--- a/deps/v8/test/mjsunit/debug-stepout-to-builtin.js
+++ b/deps/v8/test/mjsunit/debug-stepout-to-builtin.js
@@ -1,84 +1,84 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --expose-debug-as debug
-
-// Get the Debug object exposed from the debug context global object.
-Debug = debug.Debug
-
-var exception = null;
-var state = 1;
-var expected_source_line_text = null;
-var expected_function_name = null;
-
-// Simple debug event handler which first time will cause 'step out' action
-// and than check that execution is paused inside function
-// expected_function_name.
-function listener(event, exec_state, event_data, data) {
- try {
- if (event == Debug.DebugEvent.Break) {
- if (state == 1) {
- exec_state.prepareStep(Debug.StepAction.StepOut, 2);
- state = 2;
- } else if (state == 2) {
- assertEquals(expected_function_name, event_data.func().name());
- assertEquals(expected_source_line_text,
- event_data.sourceLineText());
- state = 3;
- }
- }
- } catch(e) {
- exception = e;
- }
-};
-
-// Add the debug event listener.
-Debug.setListener(listener);
-
-var obj = {key:10};
-
-function replacer(key, value) {
- if (key == 'key') {
- debugger;
- }
- return value;
-}
-
-// Test step into function call from a function without local variables.
-function testStepOutToBuiltIn() {
- expected_function_name = 'testStepOutToBuiltIn';
- expected_source_line_text = '} // expected line';
- JSON.stringify(obj, replacer);
-} // expected line
-
-state = 1;
-testStepOutToBuiltIn();
-assertNull(exception);
-assertEquals(3, state);
-
-// Get rid of the debug event listener.
-Debug.setListener(null);
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+
+// Get the Debug object exposed from the debug context global object.
+Debug = debug.Debug
+
+var exception = null;
+var state = 1;
+var expected_source_line_text = null;
+var expected_function_name = null;
+
+// Simple debug event handler which first time will cause 'step out' action
+// and than check that execution is paused inside function
+// expected_function_name.
+function listener(event, exec_state, event_data, data) {
+ try {
+ if (event == Debug.DebugEvent.Break) {
+ if (state == 1) {
+ exec_state.prepareStep(Debug.StepAction.StepOut, 2);
+ state = 2;
+ } else if (state == 2) {
+ assertEquals(expected_function_name, event_data.func().name());
+ assertEquals(expected_source_line_text,
+ event_data.sourceLineText());
+ state = 3;
+ }
+ }
+ } catch(e) {
+ exception = e;
+ }
+};
+
+// Add the debug event listener.
+Debug.setListener(listener);
+
+var obj = {key:10};
+
+function replacer(key, value) {
+ if (key == 'key') {
+ debugger;
+ }
+ return value;
+}
+
+// Test step into function call from a function without local variables.
+function testStepOutToBuiltIn() {
+ expected_function_name = 'testStepOutToBuiltIn';
+ expected_source_line_text = '} // expected line';
+ JSON.stringify(obj, replacer);
+} // expected line
+
+state = 1;
+testStepOutToBuiltIn();
+assertNull(exception);
+assertEquals(3, state);
+
+// Get rid of the debug event listener.
+Debug.setListener(null);
diff --git a/deps/v8/test/mjsunit/for-in-delete.js b/deps/v8/test/mjsunit/for-in-delete.js
new file mode 100644
index 000000000..e9fc060ae
--- /dev/null
+++ b/deps/v8/test/mjsunit/for-in-delete.js
@@ -0,0 +1,50 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test that properties deleted during a for-in iteration do not show up in
+// the for-in.
+
+function f(o, expected, del) {
+ var index = 0;
+ for (p in o) {
+ if (del) delete o[del];
+ assertEquals(expected[index], p);
+ index++;
+ }
+ assertEquals(expected.length, index);
+}
+
+var o = {}
+o.a = 1;
+o.b = 2;
+o.c = 3;
+o.d = 3;
+
+f(o, ['a', 'b', 'c', 'd']);
+f(o, ['a', 'b', 'c', 'd']);
+f(o, ['a', 'c', 'd'], 'b');
+f(o, ['a', 'c'], 'd');
diff --git a/deps/v8/test/mjsunit/fuzz-natives.js b/deps/v8/test/mjsunit/fuzz-natives.js
index 66841bbd6..11ac2e0be 100644
--- a/deps/v8/test/mjsunit/fuzz-natives.js
+++ b/deps/v8/test/mjsunit/fuzz-natives.js
@@ -174,6 +174,9 @@ var knownProblems = {
// This function performs some checks compile time (it requires its first
// argument to be a compile time smi).
"_GetFromCache": true,
+
+ // This function expects its first argument to be a non-smi.
+ "_IsStringWrapperSafeForDefaultValueOf" : true
};
var currentlyUncallable = {
diff --git a/deps/v8/test/mjsunit/global-deleted-property-keyed.js b/deps/v8/test/mjsunit/global-deleted-property-keyed.js
index e249fd32b..1a1d3cb99 100644
--- a/deps/v8/test/mjsunit/global-deleted-property-keyed.js
+++ b/deps/v8/test/mjsunit/global-deleted-property-keyed.js
@@ -1,38 +1,38 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-// Flags: --expose-natives_as natives
-// Test keyed access to deleted property in a global object without access checks.
-// Regression test that exposed the_hole value from Runtime_KeyedGetPoperty.
-
-var name = "fisk";
-natives[name] = name;
-function foo() { natives[name] + 12; }
-for(var i = 0; i < 3; i++) foo();
-delete natives[name];
-for(var i = 0; i < 3; i++) foo();
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+// Flags: --expose-natives_as natives
+// Test keyed access to deleted property in a global object without access checks.
+// Regression test that exposed the_hole value from Runtime_KeyedGetPoperty.
+
+var name = "fisk";
+natives[name] = name;
+function foo() { natives[name] + 12; }
+for(var i = 0; i < 3; i++) foo();
+delete natives[name];
+for(var i = 0; i < 3; i++) foo();
diff --git a/deps/v8/test/mjsunit/mjsunit.status b/deps/v8/test/mjsunit/mjsunit.status
index ceb5e6203..3c8cbdbfe 100644
--- a/deps/v8/test/mjsunit/mjsunit.status
+++ b/deps/v8/test/mjsunit/mjsunit.status
@@ -72,8 +72,4 @@ debug-liveedit-check-stack: SKIP
# Skip all tests on MIPS.
*: SKIP
-[ $arch == x64 ]
-# Stack manipulations in LiveEdit is implemented for ia32 only.
-debug-liveedit-check-stack: SKIP
-
diff --git a/deps/v8/test/mjsunit/object-literal.js b/deps/v8/test/mjsunit/object-literal.js
index 0ad1968e1..397d67064 100644
--- a/deps/v8/test/mjsunit/object-literal.js
+++ b/deps/v8/test/mjsunit/object-literal.js
@@ -146,7 +146,7 @@ function testKeywordProperty(keyword) {
eval("var " + keyword + " = 42;");
assertUnreachable("Not a keyword: " + keyword);
} catch (e) { }
-
+
// Simple property, read and write.
var x = eval("({" + keyword + ": 42})");
assertEquals(42, x[keyword]);
@@ -154,7 +154,7 @@ function testKeywordProperty(keyword) {
eval("x." + keyword + " = 37");
assertEquals(37, x[keyword]);
assertEquals(37, eval("x." + keyword));
-
+
// Getter/setter property, read and write.
var y = eval("({value : 42, get " + keyword + "(){return this.value}," +
" set " + keyword + "(v) { this.value = v; }})");
@@ -163,12 +163,12 @@ function testKeywordProperty(keyword) {
eval("y." + keyword + " = 37");
assertEquals(37, y[keyword]);
assertEquals(37, eval("y." + keyword));
-
+
// Quoted keyword works is read back by unquoted as well.
var z = eval("({\"" + keyword + "\": 42})");
assertEquals(42, z[keyword]);
assertEquals(42, eval("z." + keyword));
-
+
// Function property, called.
var was_called;
function test_call() { this.was_called = true; was_called = true; }
@@ -187,26 +187,4 @@ function testKeywordProperty(keyword) {
for (var i = 0; i < keywords.length; i++) {
testKeywordProperty(keywords[i]);
-}
-
-// Test getter and setter properties with string/number literal names.
-
-var obj = {get 42() { return 42; },
- get 3.14() { return "PI"; },
- get "PI"() { return 3.14; },
- readback: 0,
- set 37(v) { this.readback = v; },
- set 1.44(v) { this.readback = v; },
- set "Poo"(v) { this.readback = v; }}
-
-assertEquals(42, obj[42]);
-assertEquals("PI", obj[3.14]);
-assertEquals(3.14, obj["PI"]);
-obj[37] = "t1";
-assertEquals("t1", obj.readback);
-obj[1.44] = "t2";
-assertEquals("t2", obj.readback);
-obj["Poo"] = "t3";
-assertEquals("t3", obj.readback);
-
-
+} \ No newline at end of file
diff --git a/deps/v8/test/mjsunit/regexp-capture.js b/deps/v8/test/mjsunit/regexp-capture.js
index d4433d8cd..dc24491d9 100755
--- a/deps/v8/test/mjsunit/regexp-capture.js
+++ b/deps/v8/test/mjsunit/regexp-capture.js
@@ -1,57 +1,57 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Tests from http://blog.stevenlevithan.com/archives/npcg-javascript
-
-assertEquals(true, /(x)?\1y/.test("y"));
-assertEquals(["y", undefined], /(x)?\1y/.exec("y"));
-assertEquals(["y", undefined], /(x)?y/.exec("y"));
-assertEquals(["y", undefined], "y".match(/(x)?\1y/));
-assertEquals(["y", undefined], "y".match(/(x)?y/));
-assertEquals(["y"], "y".match(/(x)?\1y/g));
-assertEquals(["", undefined, ""], "y".split(/(x)?\1y/));
-assertEquals(["", undefined, ""], "y".split(/(x)?y/));
-assertEquals(0, "y".search(/(x)?\1y/));
-assertEquals("z", "y".replace(/(x)?\1y/, "z"));
-assertEquals("", "y".replace(/(x)?y/, "$1"));
-assertEquals("undefined", "y".replace(/(x)?\1y/,
- function($0, $1){
- return String($1);
- }));
-assertEquals("undefined", "y".replace(/(x)?y/,
- function($0, $1){
- return String($1);
- }));
-assertEquals("undefined", "y".replace(/(x)?y/,
- function($0, $1){
- return $1;
- }));
-
-// See https://bugzilla.mozilla.org/show_bug.cgi?id=476146
-assertEquals("bbc,b", /^(b+|a){1,2}?bc/.exec("bbc"));
-assertEquals("bbaa,a,,a", /((\3|b)\2(a)){2,}/.exec("bbaababbabaaaaabbaaaabba"));
-
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Tests from http://blog.stevenlevithan.com/archives/npcg-javascript
+
+assertEquals(true, /(x)?\1y/.test("y"));
+assertEquals(["y", undefined], /(x)?\1y/.exec("y"));
+assertEquals(["y", undefined], /(x)?y/.exec("y"));
+assertEquals(["y", undefined], "y".match(/(x)?\1y/));
+assertEquals(["y", undefined], "y".match(/(x)?y/));
+assertEquals(["y"], "y".match(/(x)?\1y/g));
+assertEquals(["", undefined, ""], "y".split(/(x)?\1y/));
+assertEquals(["", undefined, ""], "y".split(/(x)?y/));
+assertEquals(0, "y".search(/(x)?\1y/));
+assertEquals("z", "y".replace(/(x)?\1y/, "z"));
+assertEquals("", "y".replace(/(x)?y/, "$1"));
+assertEquals("undefined", "y".replace(/(x)?\1y/,
+ function($0, $1){
+ return String($1);
+ }));
+assertEquals("undefined", "y".replace(/(x)?y/,
+ function($0, $1){
+ return String($1);
+ }));
+assertEquals("undefined", "y".replace(/(x)?y/,
+ function($0, $1){
+ return $1;
+ }));
+
+// See https://bugzilla.mozilla.org/show_bug.cgi?id=476146
+assertEquals("bbc,b", /^(b+|a){1,2}?bc/.exec("bbc"));
+assertEquals("bbaa,a,,a", /((\3|b)\2(a)){2,}/.exec("bbaababbabaaaaabbaaaabba"));
+
diff --git a/deps/v8/test/mjsunit/regress/bitops-register-alias.js b/deps/v8/test/mjsunit/regress/bitops-register-alias.js
new file mode 100644
index 000000000..389255df0
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/bitops-register-alias.js
@@ -0,0 +1,31 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test that the code generator can cope with left and right being in
+// the same register for bitops.
+function f() { for (var i = 10; i < 100; i++) { return i | i; } }
+assertEquals(10, f());
diff --git a/deps/v8/test/mjsunit/regress/regress-246.js b/deps/v8/test/mjsunit/regress/regress-246.js
index 4324b5404..09b746b7a 100644
--- a/deps/v8/test/mjsunit/regress/regress-246.js
+++ b/deps/v8/test/mjsunit/regress/regress-246.js
@@ -1,31 +1,31 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// See: http://code.google.com/p/v8/issues/detail?id=246
-
-assertTrue(/(?:text)/.test("text"));
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// See: http://code.google.com/p/v8/issues/detail?id=246
+
+assertTrue(/(?:text)/.test("text"));
assertEquals(["text"], /(?:text)/.exec("text")); \ No newline at end of file
diff --git a/deps/v8/test/mjsunit/regress/regress-760-1.js b/deps/v8/test/mjsunit/regress/regress-760-1.js
new file mode 100644
index 000000000..2e0cee5f8
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-760-1.js
@@ -0,0 +1,49 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Check that when valueOf for a String object is overwritten it is called and
+// the result used when that object is added with a string.
+
+// See: http://code.google.com/p/v8/issues/detail?id=760
+
+String.prototype.valueOf = function() { return 'y' };
+
+function test() {
+ var o = Object('x');
+ assertEquals('y', o + '');
+ assertEquals('y', '' + o);
+}
+
+for (var i = 0; i < 10; i++) {
+ var o = Object('x');
+ assertEquals('y', o + '');
+ assertEquals('y', '' + o);
+}
+
+for (var i = 0; i < 10; i++) {
+ test()
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-760-2.js b/deps/v8/test/mjsunit/regress/regress-760-2.js
new file mode 100644
index 000000000..1b1cbfebe
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-760-2.js
@@ -0,0 +1,49 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Check that when valueOf for a String object is overwritten it is called and
+// the result used when that object is added with a string.
+
+// See: http://code.google.com/p/v8/issues/detail?id=760
+
+function test() {
+ var o = Object('x');
+ o.valueOf = function() { return 'y' };
+ assertEquals('y', o + '');
+ assertEquals('y', '' + o);
+}
+
+for (var i = 0; i < 10; i++) {
+ var o = Object('x');
+ o.valueOf = function() { return 'y' };
+ assertEquals('y', o + '');
+ assertEquals('y', '' + o);
+}
+
+for (var i = 0; i < 10; i++) {
+ test()
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-798.js b/deps/v8/test/mjsunit/regress/regress-798.js
new file mode 100644
index 000000000..423c8832a
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-798.js
@@ -0,0 +1,109 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+var x = {};
+
+// Add property a with getter/setter.
+x.__defineGetter__("a", function() {
+ try {
+ y.x = 40;
+ } catch (e) {
+ assertEquals(3, e.stack.split('\n').length);
+ }
+ return 40;
+});
+
+x.__defineSetter__("a", function(val) {
+ try {
+ y.x = 40;
+ } catch(e) {
+ assertEquals(3, e.stack.split('\n').length);
+ }
+});
+
+// Add property b with getter/setter.
+function getB() {
+ try {
+ y.x = 30;
+ } catch (e) {
+ assertEquals(3, e.stack.split('\n').length);
+ }
+ return 30;
+}
+
+function setB(val) {
+ try {
+ y.x = 30;
+ } catch(e) {
+ assertEquals(3, e.stack.split('\n').length);
+ }
+}
+
+x.__defineGetter__("b", getB);
+x.__defineSetter__("b", setB);
+
+// Add property c with getter/setter.
+var descriptor = {
+ get: function() {
+ try {
+ y.x = 40;
+ } catch (e) {
+ assertEquals(3, e.stack.split('\n').length);
+ }
+ return 40;
+ },
+ set: function(val) {
+ try {
+ y.x = 40;
+ } catch(e) {
+ assertEquals(3, e.stack.split('\n').length);
+ }
+ }
+}
+
+Object.defineProperty(x, 'c', descriptor)
+
+// Check that the stack for an exception in a getter and setter produce the
+// expected stack height.
+x.a;
+x.b;
+x.c;
+x.a = 1;
+x.b = 1;
+x.c = 1;
+
+// Do the same with the getters/setters on the a prototype object.
+xx = {}
+xx.__proto__ = x
+
+xx.a;
+xx.b;
+xx.c;
+xx.a = 1;
+xx.b = 1;
+xx.c = 1;
+
diff --git a/deps/v8/test/mjsunit/regress/regress-815.js b/deps/v8/test/mjsunit/regress/regress-815.js
new file mode 100644
index 000000000..803c0fb30
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-815.js
@@ -0,0 +1,49 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// 815 describes a situation in which the ARM code generator could
+// end up in a spilled scope in code that only worked in a register
+// allocated scope. Test that this no longer happens.
+//
+// The code generated for unary + assumes that we are not in a spilled
+// scope.
+
+var o = new Object();
+
+// The code for the iterated-over object in for-in used to be emitted
+// in a spilled scope:
+for (x in +o) { }
+
+// Emitting code for the left hand side of a for-in.
+for (a[+o] in o) {}
+
+// The receiver in an obj[index](1, 2, 3) call:
+try {
+ o[+o](1,2,3)
+} catch(e) {
+ // It's OK as long as it does not hit an assert.
+}
diff --git a/deps/v8/test/sputnik/sputnik.status b/deps/v8/test/sputnik/sputnik.status
index 13108c0fb..bc8c1e399 100644
--- a/deps/v8/test/sputnik/sputnik.status
+++ b/deps/v8/test/sputnik/sputnik.status
@@ -183,8 +183,8 @@ S8.5_A2.1: PASS, FAIL if $system == linux, FAIL if $system == macos
# These tests check for ES3 semantics, and differ from ES5.
# When we follow ES5 semantics, it's ok to fail the test.
-# Allow keywords as names of properties in object initialisers and
-# in dot-notation property access.
+# Allow keywords as names of properties in object initialisers and
+# in dot-notation property access.
S11.1.5_A4.1: FAIL_OK
S11.1.5_A4.2: FAIL_OK
diff --git a/deps/v8/tools/gc-nvp-trace-processor.py b/deps/v8/tools/gc-nvp-trace-processor.py
index 44aa0a25f..f1f9dc01c 100755
--- a/deps/v8/tools/gc-nvp-trace-processor.py
+++ b/deps/v8/tools/gc-nvp-trace-processor.py
@@ -38,7 +38,7 @@
from __future__ import with_statement
-import sys, types, re, subprocess
+import sys, types, re, subprocess, math
def flatten(l):
flat = []
@@ -262,48 +262,57 @@ plots = [
],
]
+def freduce(f, field, trace, init):
+ return reduce(lambda t,r: f(t, r[field]), trace, init)
+
def calc_total(trace, field):
- return reduce(lambda t,r: t + r[field], trace, 0)
+ return freduce(lambda t,v: t + v, field, trace, 0)
def calc_max(trace, field):
- return reduce(lambda t,r: max(t, r[field]), trace, 0)
+ return freduce(lambda t,r: max(t, r), field, trace, 0)
-def process_trace(filename):
- trace = parse_gc_trace(filename)
- total_gc = calc_total(trace, 'pause')
- max_gc = calc_max(trace, 'pause')
- avg_gc = total_gc / len(trace)
+def count_nonzero(trace, field):
+ return freduce(lambda t,r: t if r == 0 else t + 1, field, trace, 0)
- total_sweep = calc_total(trace, 'sweep')
- max_sweep = calc_max(trace, 'sweep')
- total_mark = calc_total(trace, 'mark')
- max_mark = calc_max(trace, 'mark')
+def process_trace(filename):
+ trace = parse_gc_trace(filename)
+ marksweeps = filter(lambda r: r['gc'] == 'ms', trace)
+ markcompacts = filter(lambda r: r['gc'] == 'mc', trace)
scavenges = filter(lambda r: r['gc'] == 's', trace)
- total_scavenge = calc_total(scavenges, 'pause')
- max_scavenge = calc_max(scavenges, 'pause')
- avg_scavenge = total_scavenge / len(scavenges)
charts = plot_all(plots, trace, filename)
+ def stats(out, prefix, trace, field):
+ n = len(trace)
+ total = calc_total(trace, field)
+ max = calc_max(trace, field)
+ avg = total / n
+ if n > 1:
+ dev = math.sqrt(freduce(lambda t,r: (r - avg) ** 2, field, trace, 0) /
+ (n - 1))
+ else:
+ dev = 0
+
+ out.write('<tr><td>%s</td><td>%d</td><td>%d</td>'
+ '<td>%d</td><td>%d [dev %f]</td></tr>' %
+ (prefix, n, total, max, avg, dev))
+
+
with open(filename + '.html', 'w') as out:
out.write('<html><body>')
- out.write('<table><tr><td>')
- out.write('Total in GC: <b>%d</b><br/>' % total_gc)
- out.write('Max in GC: <b>%d</b><br/>' % max_gc)
- out.write('Avg in GC: <b>%d</b><br/>' % avg_gc)
- out.write('</td><td>')
- out.write('Total in Scavenge: <b>%d</b><br/>' % total_scavenge)
- out.write('Max in Scavenge: <b>%d</b><br/>' % max_scavenge)
- out.write('Avg in Scavenge: <b>%d</b><br/>' % avg_scavenge)
- out.write('</td><td>')
- out.write('Total in Sweep: <b>%d</b><br/>' % total_sweep)
- out.write('Max in Sweep: <b>%d</b><br/>' % max_sweep)
- out.write('</td><td>')
- out.write('Total in Mark: <b>%d</b><br/>' % total_mark)
- out.write('Max in Mark: <b>%d</b><br/>' % max_mark)
- out.write('</td></tr></table>')
+ out.write('<table>')
+ out.write('<tr><td>Phase</td><td>Count</td><td>Time (ms)</td><td>Max</td><td>Avg</td></tr>')
+ stats(out, 'Total in GC', trace, 'pause')
+ stats(out, 'Scavenge', scavenges, 'pause')
+ stats(out, 'MarkSweep', marksweeps, 'pause')
+ stats(out, 'MarkCompact', markcompacts, 'pause')
+ stats(out, 'Mark', filter(lambda r: r['mark'] != 0, trace), 'mark')
+ stats(out, 'Sweep', filter(lambda r: r['sweep'] != 0, trace), 'sweep')
+ stats(out, 'Flush Code', filter(lambda r: r['flushcode'] != 0, trace), 'flushcode')
+ stats(out, 'Compact', filter(lambda r: r['compact'] != 0, trace), 'compact')
+ out.write('</table>')
for chart in charts:
out.write('<img src="%s">' % chart)
out.write('</body></html>')
diff --git a/deps/v8/tools/gyp/v8.gyp b/deps/v8/tools/gyp/v8.gyp
index 839ae0bb0..47f95026d 100644
--- a/deps/v8/tools/gyp/v8.gyp
+++ b/deps/v8/tools/gyp/v8.gyp
@@ -108,8 +108,6 @@
'conditions': [
[ 'gcc_version==44', {
'cflags': [
- # Avoid gcc 4.4 strict aliasing issues in dtoa.c
- '-fno-strict-aliasing',
# Avoid crashes with gcc 4.4 in the v8 test suite.
'-fno-tree-vrp',
],
@@ -338,7 +336,6 @@
'../../src/execution.h',
'../../src/factory.cc',
'../../src/factory.h',
- '../../src/fast-codegen.h',
'../../src/fast-dtoa.cc',
'../../src/fast-dtoa.h',
'../../src/flag-definitions.h',
@@ -398,6 +395,8 @@
'../../src/natives.h',
'../../src/objects-debug.cc',
'../../src/objects-inl.h',
+ '../../src/objects-visiting.cc',
+ '../../src/objects-visiting.h',
'../../src/objects.cc',
'../../src/objects.h',
'../../src/oprofile-agent.h',
@@ -487,7 +486,6 @@
'../../src/arm',
],
'sources': [
- '../../src/fast-codegen.cc',
'../../src/jump-target-light.h',
'../../src/jump-target-light-inl.h',
'../../src/jump-target-light.cc',
@@ -504,7 +502,6 @@
'../../src/arm/cpu-arm.cc',
'../../src/arm/debug-arm.cc',
'../../src/arm/disasm-arm.cc',
- '../../src/arm/fast-codegen-arm.cc',
'../../src/arm/frames-arm.cc',
'../../src/arm/frames-arm.h',
'../../src/arm/full-codegen-arm.cc',
@@ -549,8 +546,6 @@
'../../src/ia32/cpu-ia32.cc',
'../../src/ia32/debug-ia32.cc',
'../../src/ia32/disasm-ia32.cc',
- '../../src/ia32/fast-codegen-ia32.cc',
- '../../src/ia32/fast-codegen-ia32.h',
'../../src/ia32/frames-ia32.cc',
'../../src/ia32/frames-ia32.h',
'../../src/ia32/full-codegen-ia32.cc',
@@ -571,7 +566,6 @@
'../../src/x64',
],
'sources': [
- '../../src/fast-codegen.cc',
'../../src/jump-target-heavy.h',
'../../src/jump-target-heavy-inl.h',
'../../src/jump-target-heavy.cc',
@@ -586,7 +580,6 @@
'../../src/x64/cpu-x64.cc',
'../../src/x64/debug-x64.cc',
'../../src/x64/disasm-x64.cc',
- '../../src/x64/fast-codegen-x64.cc',
'../../src/x64/frames-x64.cc',
'../../src/x64/frames-x64.h',
'../../src/x64/full-codegen-x64.cc',
diff --git a/deps/v8/tools/oom_dump/README b/deps/v8/tools/oom_dump/README
new file mode 100644
index 000000000..5adbf65a0
--- /dev/null
+++ b/deps/v8/tools/oom_dump/README
@@ -0,0 +1,30 @@
+oom_dump extracts useful information from Google Chrome OOM minidumps.
+
+To build one needs a google-breakpad checkout
+(http://code.google.com/p/google-breakpad/).
+
+First, one needs to build and install breakpad itself. For instructions
+check google-breakpad, but currently it's as easy as:
+
+ ./configure
+ make
+ sudo make install
+
+(the catch: breakpad installs .so into /usr/local/lib, so you might
+need some additional tweaking to make it discoverable, for example,
+put a soft link into /usr/lib directory).
+
+Next step is to build v8. Note: you should build x64 version of v8,
+if you're on 64-bit platform, otherwise you would get link error when
+building oom_dump.
+
+The last step is to build oom_dump itself. The following command should work:
+
+ cd <v8 working copy>/tools/oom_dump
+ scons BREAKPAD_DIR=<path to google-breakpad working copy>
+
+(Additionally you can control v8 working copy dir, but default---../..---
+should work just fine).
+
+If everything goes fine, oom_dump <path to minidump> should print
+some useful information about OOM crash.
diff --git a/deps/v8/tools/oom_dump/SConstruct b/deps/v8/tools/oom_dump/SConstruct
new file mode 100644
index 000000000..f228c8907
--- /dev/null
+++ b/deps/v8/tools/oom_dump/SConstruct
@@ -0,0 +1,42 @@
+# Copyright 2010 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+vars = Variables('custom.py')
+vars.Add(PathVariable('BREAKPAD_DIR',
+ 'Path to checkout of google-breakpad project',
+ '~/google-breakpad',
+ PathVariable.PathIsDir))
+vars.Add(PathVariable('V8_DIR',
+ 'Path to checkout of v8 project',
+ '../..',
+ PathVariable.PathIsDir))
+
+env = Environment(variables = vars,
+ CPPPATH = ['${BREAKPAD_DIR}/src', '${V8_DIR}/src'],
+ LIBPATH = ['/usr/local/lib', '${V8_DIR}'])
+
+env.Program('oom_dump.cc', LIBS = ['breakpad', 'v8', 'pthread'])
diff --git a/deps/v8/tools/oom_dump/oom_dump.cc b/deps/v8/tools/oom_dump/oom_dump.cc
new file mode 100644
index 000000000..01f6005cb
--- /dev/null
+++ b/deps/v8/tools/oom_dump/oom_dump.cc
@@ -0,0 +1,285 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <algorithm>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <google_breakpad/processor/minidump.h>
+#include <processor/logging.h>
+
+#define ENABLE_DEBUGGER_SUPPORT
+
+#include <v8.h>
+
+namespace {
+
+using google_breakpad::Minidump;
+using google_breakpad::MinidumpContext;
+using google_breakpad::MinidumpThread;
+using google_breakpad::MinidumpThreadList;
+using google_breakpad::MinidumpException;
+using google_breakpad::MinidumpMemoryRegion;
+
+const char* InstanceTypeToString(int type) {
+ static char const* names[v8::internal::LAST_TYPE] = {0};
+ if (names[v8::internal::STRING_TYPE] == NULL) {
+ using namespace v8::internal;
+#define SET(type) names[type] = #type;
+ INSTANCE_TYPE_LIST(SET)
+#undef SET
+ }
+ return names[type];
+}
+
+
+u_int32_t ReadPointedValue(MinidumpMemoryRegion* region,
+ u_int64_t base,
+ int offset) {
+ u_int32_t ptr = 0;
+ CHECK(region->GetMemoryAtAddress(base + 4 * offset, &ptr));
+ u_int32_t value = 0;
+ CHECK(region->GetMemoryAtAddress(ptr, &value));
+ return value;
+}
+
+
+void ReadArray(MinidumpMemoryRegion* region,
+ u_int64_t array_ptr,
+ int size,
+ int* output) {
+ for (int i = 0; i < size; i++) {
+ u_int32_t value;
+ CHECK(region->GetMemoryAtAddress(array_ptr + 4 * i, &value));
+ output[i] = value;
+ }
+}
+
+
+u_int32_t ReadArrayFrom(MinidumpMemoryRegion* region,
+ u_int64_t base,
+ int offset,
+ int size,
+ int* output) {
+ u_int32_t ptr = 0;
+ CHECK(region->GetMemoryAtAddress(base + 4 * offset, &ptr));
+ ReadArray(region, ptr, size, output);
+}
+
+
+double toM(int size) {
+ return size / (1024. * 1024.);
+}
+
+
+class IndirectSorter {
+ public:
+ explicit IndirectSorter(int* a) : a_(a) { }
+
+ bool operator() (int i0, int i1) {
+ return a_[i0] > a_[i1];
+ }
+
+ private:
+ int* a_;
+};
+
+void DumpHeapStats(const char *minidump_file) {
+ Minidump minidump(minidump_file);
+ CHECK(minidump.Read());
+
+ MinidumpException *exception = minidump.GetException();
+ CHECK(exception);
+
+ MinidumpContext* crash_context = exception->GetContext();
+ CHECK(crash_context);
+
+ u_int32_t exception_thread_id = 0;
+ CHECK(exception->GetThreadID(&exception_thread_id));
+
+ MinidumpThreadList* thread_list = minidump.GetThreadList();
+ CHECK(thread_list);
+
+ MinidumpThread* exception_thread =
+ thread_list->GetThreadByID(exception_thread_id);
+ CHECK(exception_thread);
+
+ const MDRawContextX86* contextX86 = crash_context->GetContextX86();
+ CHECK(contextX86);
+
+ const u_int32_t esp = contextX86->esp;
+
+ MinidumpMemoryRegion* memory_region = exception_thread->GetMemory();
+ CHECK(memory_region);
+
+ const u_int64_t last = memory_region->GetBase() + memory_region->GetSize();
+
+ u_int64_t heap_stats_addr = 0;
+ for (u_int64_t addr = esp; addr < last; addr += 4) {
+ u_int32_t value = 0;
+ CHECK(memory_region->GetMemoryAtAddress(addr, &value));
+ if (value >= esp && value < last) {
+ u_int32_t value2 = 0;
+ CHECK(memory_region->GetMemoryAtAddress(value, &value2));
+ if (value2 == 0xdecade00) {
+ heap_stats_addr = addr;
+ break;
+ }
+ }
+ }
+ CHECK(heap_stats_addr);
+
+ // Read heap stats.
+
+#define READ_FIELD(offset) \
+ ReadPointedValue(memory_region, heap_stats_addr, offset)
+
+ CHECK(READ_FIELD(0) == 0xdecade00);
+ CHECK(READ_FIELD(23) == 0xdecade01);
+
+ const int new_space_size = READ_FIELD(1);
+ const int new_space_capacity = READ_FIELD(2);
+ const int old_pointer_space_size = READ_FIELD(3);
+ const int old_pointer_space_capacity = READ_FIELD(4);
+ const int old_data_space_size = READ_FIELD(5);
+ const int old_data_space_capacity = READ_FIELD(6);
+ const int code_space_size = READ_FIELD(7);
+ const int code_space_capacity = READ_FIELD(8);
+ const int map_space_size = READ_FIELD(9);
+ const int map_space_capacity = READ_FIELD(10);
+ const int cell_space_size = READ_FIELD(11);
+ const int cell_space_capacity = READ_FIELD(12);
+ const int lo_space_size = READ_FIELD(13);
+ const int global_handle_count = READ_FIELD(14);
+ const int weak_global_handle_count = READ_FIELD(15);
+ const int pending_global_handle_count = READ_FIELD(16);
+ const int near_death_global_handle_count = READ_FIELD(17);
+ const int destroyed_global_handle_count = READ_FIELD(18);
+ const int memory_allocator_size = READ_FIELD(19);
+ const int memory_allocator_capacity = READ_FIELD(20);
+#undef READ_FIELD
+
+ int objects_per_type[v8::internal::LAST_TYPE + 1] = {0};
+ ReadArrayFrom(memory_region, heap_stats_addr, 21,
+ v8::internal::LAST_TYPE + 1, objects_per_type);
+
+ int size_per_type[v8::internal::LAST_TYPE + 1] = {0};
+ ReadArrayFrom(memory_region, heap_stats_addr, 22, v8::internal::LAST_TYPE + 1,
+ size_per_type);
+
+ int js_global_objects =
+ objects_per_type[v8::internal::JS_GLOBAL_OBJECT_TYPE];
+ int js_builtins_objects =
+ objects_per_type[v8::internal::JS_BUILTINS_OBJECT_TYPE];
+ int js_global_proxies =
+ objects_per_type[v8::internal::JS_GLOBAL_PROXY_TYPE];
+
+ int indices[v8::internal::LAST_TYPE + 1];
+ for (int i = 0; i <= v8::internal::LAST_TYPE; i++) {
+ indices[i] = i;
+ }
+
+ std::stable_sort(indices, indices + sizeof(indices)/sizeof(indices[0]),
+ IndirectSorter(size_per_type));
+
+ int total_size = 0;
+ for (int i = 0; i <= v8::internal::LAST_TYPE; i++) {
+ total_size += size_per_type[i];
+ }
+
+ // Print heap stats.
+
+ printf("exception thread ID: %d (%x)\n",
+ exception_thread_id, exception_thread_id);
+ printf("heap stats address: %p\n", (void*)heap_stats_addr);
+#define PRINT_INT_STAT(stat) \
+ printf("\t%-25s\t% 10d\n", #stat ":", stat);
+#define PRINT_MB_STAT(stat) \
+ printf("\t%-25s\t% 10.3f MB\n", #stat ":", toM(stat));
+ PRINT_MB_STAT(new_space_size);
+ PRINT_MB_STAT(new_space_capacity);
+ PRINT_MB_STAT(old_pointer_space_size);
+ PRINT_MB_STAT(old_pointer_space_capacity);
+ PRINT_MB_STAT(old_data_space_size);
+ PRINT_MB_STAT(old_data_space_capacity);
+ PRINT_MB_STAT(code_space_size);
+ PRINT_MB_STAT(code_space_capacity);
+ PRINT_MB_STAT(map_space_size);
+ PRINT_MB_STAT(map_space_capacity);
+ PRINT_MB_STAT(cell_space_size);
+ PRINT_MB_STAT(cell_space_capacity);
+ PRINT_MB_STAT(lo_space_size);
+ PRINT_INT_STAT(global_handle_count);
+ PRINT_INT_STAT(weak_global_handle_count);
+ PRINT_INT_STAT(pending_global_handle_count);
+ PRINT_INT_STAT(near_death_global_handle_count);
+ PRINT_INT_STAT(destroyed_global_handle_count);
+ PRINT_MB_STAT(memory_allocator_size);
+ PRINT_MB_STAT(memory_allocator_capacity);
+#undef PRINT_STAT
+
+ printf("\n");
+
+ printf(
+ "\tJS_GLOBAL_OBJECT_TYPE/JS_BUILTINS_OBJECT_TYPE/JS_GLOBAL_PROXY_TYPE: "
+ "%d/%d/%d\n\n",
+ js_global_objects, js_builtins_objects, js_global_proxies);
+
+ int running_size = 0;
+ for (int i = 0; i <= v8::internal::LAST_TYPE; i++) {
+ int type = indices[i];
+ const char* name = InstanceTypeToString(type);
+ if (name == NULL) {
+ // Unknown instance type. Check that there is no objects of that type.
+ CHECK(objects_per_type[type] == 0);
+ CHECK(size_per_type[type] == 0);
+ continue;
+ }
+ int size = size_per_type[type];
+ running_size += size;
+ printf("\t%-37s% 9d% 11.3f MB% 10.3f%%% 10.3f%%\n",
+ name, objects_per_type[type], toM(size),
+ 100.*size/total_size, 100.*running_size/total_size);
+ }
+ printf("\t%-37s% 9d% 11.3f MB% 10.3f%%% 10.3f%%\n",
+ "total", 0, toM(total_size), 100., 100.);
+}
+
+} // namespace
+
+int main(int argc, char **argv) {
+ BPLOG_INIT(&argc, &argv);
+
+ if (argc != 2) {
+ fprintf(stderr, "usage: %s <minidump>\n", argv[0]);
+ return 1;
+ }
+
+ DumpHeapStats(argv[1]);
+
+ return 0;
+}
diff --git a/deps/v8/tools/v8.xcodeproj/project.pbxproj b/deps/v8/tools/v8.xcodeproj/project.pbxproj
index b28945449..0ca6a9ddf 100644
--- a/deps/v8/tools/v8.xcodeproj/project.pbxproj
+++ b/deps/v8/tools/v8.xcodeproj/project.pbxproj
@@ -240,15 +240,14 @@
9FA38BCF1175B30400C4CD55 /* full-codegen-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BCB1175B30400C4CD55 /* full-codegen-arm.cc */; };
9FA38BD01175B30400C4CD55 /* jump-target-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BCC1175B30400C4CD55 /* jump-target-arm.cc */; };
9FA38BD11175B30400C4CD55 /* virtual-frame-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BCD1175B30400C4CD55 /* virtual-frame-arm.cc */; };
- 9FBE03DF10BD409900F8BFBA /* fast-codegen.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FBE03DC10BD409900F8BFBA /* fast-codegen.cc */; };
- 9FBE03E210BD40EA00F8BFBA /* fast-codegen-ia32.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FBE03E110BD40EA00F8BFBA /* fast-codegen-ia32.cc */; };
- 9FBE03E510BD412600F8BFBA /* fast-codegen-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FBE03E410BD412600F8BFBA /* fast-codegen-arm.cc */; };
9FC86ABD0F5FEDAC00F22668 /* oprofile-agent.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FC86ABB0F5FEDAC00F22668 /* oprofile-agent.cc */; };
9FC86ABE0F5FEDAC00F22668 /* oprofile-agent.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FC86ABB0F5FEDAC00F22668 /* oprofile-agent.cc */; };
C2BD4BD7120165460046BF9F /* dtoa.cc in Sources */ = {isa = PBXBuildFile; fileRef = C2BD4BD5120165460046BF9F /* dtoa.cc */; };
C2BD4BDB120165A70046BF9F /* fixed-dtoa.cc in Sources */ = {isa = PBXBuildFile; fileRef = C2BD4BD9120165A70046BF9F /* fixed-dtoa.cc */; };
C2BD4BE4120166180046BF9F /* fixed-dtoa.cc in Sources */ = {isa = PBXBuildFile; fileRef = C2BD4BD9120165A70046BF9F /* fixed-dtoa.cc */; };
C2BD4BE51201661F0046BF9F /* dtoa.cc in Sources */ = {isa = PBXBuildFile; fileRef = C2BD4BD5120165460046BF9F /* dtoa.cc */; };
+ C2D1E9731212F2BC00187A52 /* objects-visiting.cc in Sources */ = {isa = PBXBuildFile; fileRef = C2D1E9711212F27B00187A52 /* objects-visiting.cc */; };
+ C2D1E9741212F2CF00187A52 /* objects-visiting.cc in Sources */ = {isa = PBXBuildFile; fileRef = C2D1E9711212F27B00187A52 /* objects-visiting.cc */; };
/* End PBXBuildFile section */
/* Begin PBXContainerItemProxy section */
@@ -613,17 +612,12 @@
9FA38BB01175B2D200C4CD55 /* virtual-frame-heavy-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "virtual-frame-heavy-inl.h"; sourceTree = "<group>"; };
9FA38BB11175B2D200C4CD55 /* virtual-frame-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "virtual-frame-inl.h"; sourceTree = "<group>"; };
9FA38BB21175B2D200C4CD55 /* virtual-frame-light-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "virtual-frame-light-inl.h"; sourceTree = "<group>"; };
- 9FA38BC11175B2E500C4CD55 /* fast-codegen-ia32.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "fast-codegen-ia32.h"; path = "ia32/fast-codegen-ia32.h"; sourceTree = "<group>"; };
9FA38BC21175B2E500C4CD55 /* full-codegen-ia32.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "full-codegen-ia32.cc"; path = "ia32/full-codegen-ia32.cc"; sourceTree = "<group>"; };
9FA38BC31175B2E500C4CD55 /* jump-target-ia32.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "jump-target-ia32.cc"; path = "ia32/jump-target-ia32.cc"; sourceTree = "<group>"; };
9FA38BC41175B2E500C4CD55 /* virtual-frame-ia32.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "virtual-frame-ia32.cc"; path = "ia32/virtual-frame-ia32.cc"; sourceTree = "<group>"; };
9FA38BCB1175B30400C4CD55 /* full-codegen-arm.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "full-codegen-arm.cc"; path = "arm/full-codegen-arm.cc"; sourceTree = "<group>"; };
9FA38BCC1175B30400C4CD55 /* jump-target-arm.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "jump-target-arm.cc"; path = "arm/jump-target-arm.cc"; sourceTree = "<group>"; };
9FA38BCD1175B30400C4CD55 /* virtual-frame-arm.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "virtual-frame-arm.cc"; path = "arm/virtual-frame-arm.cc"; sourceTree = "<group>"; };
- 9FBE03DC10BD409900F8BFBA /* fast-codegen.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "fast-codegen.cc"; sourceTree = "<group>"; };
- 9FBE03DD10BD409900F8BFBA /* fast-codegen.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "fast-codegen.h"; sourceTree = "<group>"; };
- 9FBE03E110BD40EA00F8BFBA /* fast-codegen-ia32.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "fast-codegen-ia32.cc"; path = "ia32/fast-codegen-ia32.cc"; sourceTree = "<group>"; };
- 9FBE03E410BD412600F8BFBA /* fast-codegen-arm.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "fast-codegen-arm.cc"; path = "arm/fast-codegen-arm.cc"; sourceTree = "<group>"; };
9FC86ABB0F5FEDAC00F22668 /* oprofile-agent.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "oprofile-agent.cc"; sourceTree = "<group>"; };
9FC86ABC0F5FEDAC00F22668 /* oprofile-agent.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "oprofile-agent.h"; sourceTree = "<group>"; };
9FF7A28211A642EA0051B8F2 /* unbound-queue-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "unbound-queue-inl.h"; sourceTree = "<group>"; };
@@ -632,6 +626,8 @@
C2BD4BD6120165460046BF9F /* dtoa.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = dtoa.h; sourceTree = "<group>"; };
C2BD4BD9120165A70046BF9F /* fixed-dtoa.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "fixed-dtoa.cc"; sourceTree = "<group>"; };
C2BD4BDA120165A70046BF9F /* fixed-dtoa.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "fixed-dtoa.h"; sourceTree = "<group>"; };
+ C2D1E9711212F27B00187A52 /* objects-visiting.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "objects-visiting.cc"; sourceTree = "<group>"; };
+ C2D1E9721212F27B00187A52 /* objects-visiting.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "objects-visiting.h"; sourceTree = "<group>"; };
/* End PBXFileReference section */
/* Begin PBXFrameworksBuildPhase section */
@@ -726,6 +722,8 @@
897FF0F80E719B8F00D62E90 /* allocation.cc */,
897FF0F90E719B8F00D62E90 /* allocation.h */,
897FF0FA0E719B8F00D62E90 /* api.cc */,
+ C2D1E9711212F27B00187A52 /* objects-visiting.cc */,
+ C2D1E9721212F27B00187A52 /* objects-visiting.h */,
897FF0FB0E719B8F00D62E90 /* api.h */,
893986D40F29020C007D5254 /* apiutils.h */,
897FF0FC0E719B8F00D62E90 /* arguments.h */,
@@ -813,11 +811,6 @@
897FF1310E719B8F00D62E90 /* execution.h */,
897FF1320E719B8F00D62E90 /* factory.cc */,
897FF1330E719B8F00D62E90 /* factory.h */,
- 9FBE03E410BD412600F8BFBA /* fast-codegen-arm.cc */,
- 9FBE03E110BD40EA00F8BFBA /* fast-codegen-ia32.cc */,
- 9FA38BC11175B2E500C4CD55 /* fast-codegen-ia32.h */,
- 9FBE03DC10BD409900F8BFBA /* fast-codegen.cc */,
- 9FBE03DD10BD409900F8BFBA /* fast-codegen.h */,
9FA38BA11175B2D200C4CD55 /* fast-dtoa.cc */,
9FA38BA21175B2D200C4CD55 /* fast-dtoa.h */,
89471C7F0EB23EE400B6874B /* flag-definitions.h */,
@@ -1268,6 +1261,7 @@
isa = PBXSourcesBuildPhase;
buildActionMask = 2147483647;
files = (
+ C2D1E9731212F2BC00187A52 /* objects-visiting.cc in Sources */,
89A88DEC0E71A5FF0043BA31 /* accessors.cc in Sources */,
89A88DED0E71A6000043BA31 /* allocation.cc in Sources */,
89A88DEE0E71A6010043BA31 /* api.cc in Sources */,
@@ -1302,7 +1296,6 @@
89A88E020E71A65A0043BA31 /* dtoa-config.c in Sources */,
89A88E030E71A65B0043BA31 /* execution.cc in Sources */,
89A88E040E71A65D0043BA31 /* factory.cc in Sources */,
- 9FBE03E210BD40EA00F8BFBA /* fast-codegen-ia32.cc in Sources */,
9FA38BBC1175B2D200C4CD55 /* fast-dtoa.cc in Sources */,
89A88E050E71A65D0043BA31 /* flags.cc in Sources */,
9FA38BBD1175B2D200C4CD55 /* flow-graph.cc in Sources */,
@@ -1391,6 +1384,7 @@
isa = PBXSourcesBuildPhase;
buildActionMask = 2147483647;
files = (
+ C2D1E9741212F2CF00187A52 /* objects-visiting.cc in Sources */,
89F23C3F0E78D5B2006B2466 /* accessors.cc in Sources */,
89F23C400E78D5B2006B2466 /* allocation.cc in Sources */,
89F23C410E78D5B2006B2466 /* api.cc in Sources */,
@@ -1426,8 +1420,6 @@
89F23C550E78D5B2006B2466 /* dtoa-config.c in Sources */,
89F23C560E78D5B2006B2466 /* execution.cc in Sources */,
89F23C570E78D5B2006B2466 /* factory.cc in Sources */,
- 9FBE03E510BD412600F8BFBA /* fast-codegen-arm.cc in Sources */,
- 9FBE03DF10BD409900F8BFBA /* fast-codegen.cc in Sources */,
9FA38BB51175B2D200C4CD55 /* fast-dtoa.cc in Sources */,
89F23C580E78D5B2006B2466 /* flags.cc in Sources */,
9FA38BB61175B2D200C4CD55 /* flow-graph.cc in Sources */,
diff --git a/deps/v8/tools/visual_studio/v8_base.vcproj b/deps/v8/tools/visual_studio/v8_base.vcproj
index 2571b6525..ef0877349 100644
--- a/deps/v8/tools/visual_studio/v8_base.vcproj
+++ b/deps/v8/tools/visual_studio/v8_base.vcproj
@@ -464,18 +464,6 @@
RelativePath="..\..\src\factory.h"
>
</File>
- <File
- RelativePath="..\..\src\ia32\fast-codegen-ia32.cc"
- >
- </File>
- <File
- RelativePath="..\..\src\ia32\fast-codegen-ia32.h"
- >
- </File>
- <File
- RelativePath="..\..\src\fast-codegen.h"
- >
- </File>
<File
RelativePath="..\..\src\fast-dtoa.cc"
>
@@ -753,6 +741,15 @@
>
</File>
<File
+ RelativePath="..\..\src\objects-visiting.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\objects-visiting.h"
+ >
+ </File>
+
+ <File
RelativePath="..\..\src\objects.cc"
>
</File>
diff --git a/deps/v8/tools/visual_studio/v8_base_arm.vcproj b/deps/v8/tools/visual_studio/v8_base_arm.vcproj
index a3c597099..aa1e8229e 100644
--- a/deps/v8/tools/visual_studio/v8_base_arm.vcproj
+++ b/deps/v8/tools/visual_studio/v8_base_arm.vcproj
@@ -432,18 +432,6 @@
RelativePath="..\..\src\factory.h"
>
</File>
- <File
- RelativePath="..\..\src\arm\fast-codegen-arm.cc"
- >
- </File>
- <File
- RelativePath="..\..\src\fast-codegen.cc"
- >
- </File>
- <File
- RelativePath="..\..\src\fast-codegen.h"
- >
- </File>
<File
RelativePath="..\..\src\flags.cc"
>
@@ -713,6 +701,13 @@
>
</File>
<File
+ RelativePath="..\..\src\objects-visiting.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\objects-visiting.h"
+ >
+ <File
RelativePath="..\..\src\objects.cc"
>
</File>
diff --git a/deps/v8/tools/visual_studio/v8_base_x64.vcproj b/deps/v8/tools/visual_studio/v8_base_x64.vcproj
index 708b380c9..33c53940b 100644
--- a/deps/v8/tools/visual_studio/v8_base_x64.vcproj
+++ b/deps/v8/tools/visual_studio/v8_base_x64.vcproj
@@ -425,18 +425,6 @@
>
</File>
<File
- RelativePath="..\..\src\x64\fast-codegen-x64.cc"
- >
- </File>
- <File
- RelativePath="..\..\src\fast-codegen.cc"
- >
- </File>
- <File
- RelativePath="..\..\src\fast-codegen.h"
- >
- </File>
- <File
RelativePath="..\..\src\flags.cc"
>
</File>
@@ -706,6 +694,13 @@
>
</File>
<File
+ RelativePath="..\..\src\objects-visiting.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\objects-visiting.h"
+ >
+ <File
RelativePath="..\..\src\objects.cc"
>
</File>