summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRyan Dahl <ry@tinyclouds.org>2011-07-05 11:41:56 -0700
committerRyan Dahl <ry@tinyclouds.org>2011-07-05 11:41:56 -0700
commit6054dcc1302d1a1ef51e215dcf1ad16fb967f1ba (patch)
treebfdb8d58cdbe55de1e1e05a8331b0a1623f9aaa5
parent33af2720f26c2b25bc7f75ce7eb454ff99db6d35 (diff)
downloadnode-6054dcc1302d1a1ef51e215dcf1ad16fb967f1ba.tar.gz
Upgrade V8 to 3.4.9
-rw-r--r--deps/v8/AUTHORS1
-rw-r--r--deps/v8/ChangeLog61
-rw-r--r--deps/v8/SConstruct39
-rw-r--r--deps/v8/include/v8.h46
-rw-r--r--deps/v8/samples/shell.cc9
-rw-r--r--deps/v8/src/api.cc33
-rw-r--r--deps/v8/src/arm/assembler-arm.h10
-rw-r--r--deps/v8/src/arm/builtins-arm.cc3
-rw-r--r--deps/v8/src/arm/code-stubs-arm.cc26
-rw-r--r--deps/v8/src/arm/code-stubs-arm.h16
-rw-r--r--deps/v8/src/arm/deoptimizer-arm.cc30
-rw-r--r--deps/v8/src/arm/full-codegen-arm.cc201
-rw-r--r--deps/v8/src/arm/ic-arm.cc3
-rw-r--r--deps/v8/src/arm/lithium-arm.cc199
-rw-r--r--deps/v8/src/arm/lithium-arm.h188
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.cc236
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc115
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h22
-rw-r--r--deps/v8/src/array.js15
-rw-r--r--deps/v8/src/ast.h14
-rw-r--r--deps/v8/src/compilation-cache.cc49
-rw-r--r--deps/v8/src/compilation-cache.h10
-rwxr-xr-xdeps/v8/src/compiler.cc14
-rw-r--r--deps/v8/src/contexts.h2
-rw-r--r--deps/v8/src/date.js21
-rw-r--r--deps/v8/src/dateparser-inl.h281
-rw-r--r--deps/v8/src/dateparser.cc42
-rw-r--r--deps/v8/src/dateparser.h198
-rw-r--r--deps/v8/src/debug-debugger.js15
-rw-r--r--deps/v8/src/deoptimizer.cc190
-rw-r--r--deps/v8/src/deoptimizer.h96
-rw-r--r--deps/v8/src/flag-definitions.h3
-rw-r--r--deps/v8/src/frames.cc37
-rw-r--r--deps/v8/src/frames.h8
-rw-r--r--deps/v8/src/full-codegen.cc8
-rw-r--r--deps/v8/src/full-codegen.h16
-rw-r--r--deps/v8/src/gdb-jit.cc710
-rw-r--r--deps/v8/src/gdb-jit.h8
-rw-r--r--deps/v8/src/handles.cc7
-rw-r--r--deps/v8/src/handles.h2
-rw-r--r--deps/v8/src/heap.cc4
-rw-r--r--deps/v8/src/hydrogen-instructions.cc41
-rw-r--r--deps/v8/src/hydrogen-instructions.h287
-rw-r--r--deps/v8/src/hydrogen.cc423
-rw-r--r--deps/v8/src/hydrogen.h17
-rw-r--r--deps/v8/src/ia32/assembler-ia32.h6
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.cc11
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.h14
-rw-r--r--deps/v8/src/ia32/deoptimizer-ia32.cc29
-rw-r--r--deps/v8/src/ia32/full-codegen-ia32.cc208
-rw-r--r--deps/v8/src/ia32/ic-ia32.cc2
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.cc257
-rw-r--r--deps/v8/src/ia32/lithium-ia32.cc222
-rw-r--r--deps/v8/src/ia32/lithium-ia32.h187
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc11
-rw-r--r--deps/v8/src/ic.cc79
-rw-r--r--deps/v8/src/ic.h6
-rw-r--r--deps/v8/src/isolate.cc5
-rw-r--r--deps/v8/src/isolate.h4
-rw-r--r--deps/v8/src/log.cc24
-rw-r--r--deps/v8/src/log.h6
-rw-r--r--deps/v8/src/mark-compact.cc57
-rw-r--r--deps/v8/src/mark-compact.h4
-rw-r--r--deps/v8/src/mips/code-stubs-mips.cc13
-rw-r--r--deps/v8/src/mips/code-stubs-mips.h16
-rw-r--r--deps/v8/src/mips/deoptimizer-mips.cc5
-rw-r--r--deps/v8/src/mips/full-codegen-mips.cc237
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc21
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.h4
-rw-r--r--deps/v8/src/mirror-debugger.js35
-rw-r--r--deps/v8/src/objects.cc50
-rw-r--r--deps/v8/src/objects.h9
-rw-r--r--deps/v8/src/parser.cc126
-rw-r--r--deps/v8/src/parser.h6
-rw-r--r--deps/v8/src/platform-solaris.cc232
-rw-r--r--deps/v8/src/prettyprinter.cc9
-rw-r--r--deps/v8/src/profile-generator.cc108
-rw-r--r--deps/v8/src/profile-generator.h2
-rw-r--r--deps/v8/src/rewriter.cc2
-rw-r--r--deps/v8/src/runtime-profiler.cc143
-rw-r--r--deps/v8/src/runtime-profiler.h23
-rw-r--r--deps/v8/src/runtime.cc236
-rw-r--r--deps/v8/src/runtime.h25
-rw-r--r--deps/v8/src/scopes.cc150
-rw-r--r--deps/v8/src/scopes.h31
-rw-r--r--deps/v8/src/string.js11
-rw-r--r--deps/v8/src/stub-cache.cc4
-rw-r--r--deps/v8/src/type-info.cc1
-rw-r--r--deps/v8/src/v8-counters.h4
-rw-r--r--deps/v8/src/v8.cc44
-rw-r--r--deps/v8/src/version.cc2
-rw-r--r--deps/v8/src/x64/assembler-x64.h6
-rw-r--r--deps/v8/src/x64/code-stubs-x64.cc13
-rw-r--r--deps/v8/src/x64/code-stubs-x64.h16
-rw-r--r--deps/v8/src/x64/deoptimizer-x64.cc32
-rw-r--r--deps/v8/src/x64/full-codegen-x64.cc199
-rw-r--r--deps/v8/src/x64/ic-x64.cc2
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.cc260
-rw-r--r--deps/v8/src/x64/lithium-x64.cc199
-rw-r--r--deps/v8/src/x64/lithium-x64.h190
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc11
-rw-r--r--deps/v8/test/cctest/test-api.cc25
-rw-r--r--deps/v8/test/cctest/test-compiler.cc13
-rw-r--r--deps/v8/test/cctest/test-log.cc94
-rw-r--r--deps/v8/test/mjsunit/assert-opt-and-deopt.js2
-rw-r--r--deps/v8/test/mjsunit/date-parse.js6
-rw-r--r--deps/v8/test/mjsunit/date.js120
-rw-r--r--deps/v8/test/mjsunit/debug-evaluate-locals-optimized-double.js132
-rw-r--r--deps/v8/test/mjsunit/debug-evaluate-locals-optimized.js119
-rw-r--r--deps/v8/test/mjsunit/element-kind.js102
-rw-r--r--deps/v8/test/mjsunit/fuzz-natives.js3
-rw-r--r--deps/v8/test/mjsunit/mjsunit.status6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1360.js39
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1513.js44
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1521.js47
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1528.js40
-rw-r--r--deps/v8/test/sputnik/README4
-rw-r--r--deps/v8/test/sputnik/sputnik.status18
-rw-r--r--deps/v8/tools/gdb-v8-support.py154
-rwxr-xr-xdeps/v8/tools/grokdump.py116
-rwxr-xr-xdeps/v8/tools/ll_prof.py6
121 files changed, 4575 insertions, 3880 deletions
diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS
index d2dab733d..d4e35fe80 100644
--- a/deps/v8/AUTHORS
+++ b/deps/v8/AUTHORS
@@ -36,6 +36,7 @@ Patrick Gansterer <paroga@paroga.com>
Peter Varga <pvarga@inf.u-szeged.hu>
Rafal Krypa <rafal@krypa.net>
Rene Rebe <rene@exactcode.de>
+Robert Mustacchi <rm@fingolfin.org>
Rodolph Perfetta <rodolph.perfetta@arm.com>
Ryan Dahl <coldredlemur@gmail.com>
Sanjoy Das <sanjoy@playingwithpointers.com>
diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog
index a5b035881..f936c7a0d 100644
--- a/deps/v8/ChangeLog
+++ b/deps/v8/ChangeLog
@@ -1,3 +1,18 @@
+2011-07-04: Version 3.4.9
+
+ Added support for debugger inspection of locals in optimized frames
+ (issue 1140).
+
+ Fixed SConstruct to pass correct defines to samples/preparser when
+ building with library=shared.
+
+ Made date parser handle ES5 Date Time Strings correctly (issue 1498).
+
+ Fixed a bug in Object.defineProperty on the arguments object.
+
+ Performance improvements on all platforms.
+
+
2011-06-29: Version 3.4.8
Ensure 16-byte stack alignment on Solaris (issue 1505).
@@ -54,41 +69,41 @@
2011-06-15: Version 3.4.4
- Added snapshot compression support and --stress-opt flag to d8.
+ Added snapshot compression support and --stress-opt flag to d8.
- Improved performance of try/catch.
+ Improved performance of try/catch.
- Several GYP-related changes: Added support for building Xcode project
- files. Make the ARM simulator build with GYP again. Generate Makefiles
- for all architectures on Linux.
+ Several GYP-related changes: Added support for building Xcode project
+ files. Make the ARM simulator build with GYP again. Generate Makefiles
+ for all architectures on Linux.
- Fixed Array.prototype.{reduce,reduceRight} to pass undefined as the
- receiver for strict mode callbacks. (issue 1436)
+ Fixed Array.prototype.{reduce,reduceRight} to pass undefined as the
+ receiver for strict mode callbacks. (issue 1436)
- Fixed a bug where an array load was incorrectly hoisted by GVN.
+ Fixed a bug where an array load was incorrectly hoisted by GVN.
- Handle 'undefined' correctly when === has been specialized for doubles.
- (issue 1434)
+ Handle 'undefined' correctly when === has been specialized for doubles.
+ (issue 1434)
- Corrected the limit of local variables in an optimized function from 64
- to 63.
+ Corrected the limit of local variables in an optimized function from 64
+ to 63.
- Correctly set ReadOnly flag on indexed properties when using the API Set
- method. (issue 1470)
+ Correctly set ReadOnly flag on indexed properties when using the API Set
+ method. (issue 1470)
- Give the correct error message when Object.isExtensible is called on a
- non-object. (issue 1452)
+ Give the correct error message when Object.isExtensible is called on a
+ non-object. (issue 1452)
- Added GetOwnPropertyNames method for Object in the API. Patch by Peter
- Varga.
+ Added GetOwnPropertyNames method for Object in the API. Patch by Peter
+ Varga.
- Do not redefine properties unneccesarily in seal and freeze. (issue
- 1447)
+ Do not redefine properties unneccesarily in seal and freeze. (issue
+ 1447)
- IsExecutionTerminating has an Isolate parameter now.
+ IsExecutionTerminating has an Isolate parameter now.
- Distinguish keyed loads with a symbol key from fast elements loads,
- avoiding some useless deoptimizations. (issue 1471)
+ Distinguish keyed loads with a symbol key from fast elements loads,
+ avoiding some useless deoptimizations. (issue 1471)
2011-06-08: Version 3.4.3
diff --git a/deps/v8/SConstruct b/deps/v8/SConstruct
index a51e7c8ef..7ee9f136a 100644
--- a/deps/v8/SConstruct
+++ b/deps/v8/SConstruct
@@ -89,7 +89,7 @@ LIBRARY_FLAGS = {
'gcc': {
'all': {
'CCFLAGS': ['$DIALECTFLAGS', '$WARNINGFLAGS'],
- 'CXXFLAGS': ['$CCFLAGS', '-fno-rtti', '-fno-exceptions'],
+ 'CXXFLAGS': ['-fno-rtti', '-fno-exceptions'],
},
'visibility:hidden': {
# Use visibility=default to disable this.
@@ -230,7 +230,7 @@ LIBRARY_FLAGS = {
'msvc': {
'all': {
'CCFLAGS': ['$DIALECTFLAGS', '$WARNINGFLAGS'],
- 'CXXFLAGS': ['$CCFLAGS', '/GR-', '/Gy'],
+ 'CXXFLAGS': ['/GR-', '/Gy'],
'CPPDEFINES': ['WIN32'],
'LINKFLAGS': ['/INCREMENTAL:NO', '/NXCOMPAT', '/IGNORE:4221'],
'CCPDBFLAGS': ['/Zi']
@@ -400,12 +400,15 @@ DTOA_EXTRA_FLAGS = {
CCTEST_EXTRA_FLAGS = {
'all': {
'CPPPATH': [join(root_dir, 'src')],
+ 'library:shared': {
+ 'CPPDEFINES': ['USING_V8_SHARED']
+ },
},
'gcc': {
'all': {
'LIBPATH': [abspath('.')],
'CCFLAGS': ['$DIALECTFLAGS', '$WARNINGFLAGS'],
- 'CXXFLAGS': ['$CCFLAGS', '-fno-rtti', '-fno-exceptions'],
+ 'CXXFLAGS': ['-fno-rtti', '-fno-exceptions'],
'LINKFLAGS': ['$CCFLAGS'],
},
'os:linux': {
@@ -436,9 +439,6 @@ CCTEST_EXTRA_FLAGS = {
'CPPDEFINES': ['_HAS_EXCEPTIONS=0'],
'LIBS': ['winmm', 'ws2_32']
},
- 'library:shared': {
- 'CPPDEFINES': ['USING_V8_SHARED']
- },
'arch:ia32': {
'CPPDEFINES': ['V8_TARGET_ARCH_IA32']
},
@@ -453,12 +453,15 @@ CCTEST_EXTRA_FLAGS = {
SAMPLE_FLAGS = {
'all': {
'CPPPATH': [join(abspath('.'), 'include')],
+ 'library:shared': {
+ 'CPPDEFINES': ['USING_V8_SHARED']
+ },
},
'gcc': {
'all': {
'LIBPATH': ['.'],
'CCFLAGS': ['$DIALECTFLAGS', '$WARNINGFLAGS'],
- 'CXXFLAGS': ['$CCFLAGS', '-fno-rtti', '-fno-exceptions'],
+ 'CXXFLAGS': ['-fno-rtti', '-fno-exceptions'],
'LINKFLAGS': ['$CCFLAGS'],
},
'os:linux': {
@@ -472,6 +475,9 @@ SAMPLE_FLAGS = {
'LIBS': ['execinfo', 'pthread']
},
'os:solaris': {
+ # On Solaris, to get isinf, INFINITY, fpclassify and other macros one
+ # needs to define __C99FEATURES__.
+ 'CPPDEFINES': ['__C99FEATURES__'],
'LIBPATH' : ['/usr/local/lib'],
'LIBS': ['m', 'pthread', 'socket', 'nsl', 'rt'],
'LINKFLAGS': ['-mt']
@@ -572,9 +578,6 @@ SAMPLE_FLAGS = {
'verbose:on': {
'LINKFLAGS': ['/VERBOSE']
},
- 'library:shared': {
- 'CPPDEFINES': ['USING_V8_SHARED']
- },
'prof:on': {
'LINKFLAGS': ['/MAP']
},
@@ -625,13 +628,16 @@ SAMPLE_FLAGS = {
PREPARSER_FLAGS = {
'all': {
- 'CPPPATH': [join(abspath('.'), 'include'), join(abspath('.'), 'src')]
+ 'CPPPATH': [join(abspath('.'), 'include'), join(abspath('.'), 'src')],
+ 'library:shared': {
+ 'CPPDEFINES': ['USING_V8_SHARED']
+ },
},
'gcc': {
'all': {
'LIBPATH': ['.'],
'CCFLAGS': ['$DIALECTFLAGS', '$WARNINGFLAGS'],
- 'CXXFLAGS': ['$CCFLAGS', '-fno-rtti', '-fno-exceptions'],
+ 'CXXFLAGS': ['-fno-rtti', '-fno-exceptions'],
'LINKFLAGS': ['$CCFLAGS'],
},
'os:win32': {
@@ -727,9 +733,6 @@ PREPARSER_FLAGS = {
'verbose:on': {
'LINKFLAGS': ['/VERBOSE']
},
- 'library:shared': {
- 'CPPDEFINES': ['USING_V8_SHARED']
- },
'prof:on': {
'LINKFLAGS': ['/MAP']
},
@@ -782,7 +785,7 @@ D8_FLAGS = {
'gcc': {
'all': {
'CCFLAGS': ['$DIALECTFLAGS', '$WARNINGFLAGS'],
- 'CXXFLAGS': ['$CCFLAGS', '-fno-rtti', '-fno-exceptions'],
+ 'CXXFLAGS': ['-fno-rtti', '-fno-exceptions'],
'LINKFLAGS': ['$CCFLAGS'],
},
'console:readline': {
@@ -1155,8 +1158,8 @@ def VerifyOptions(env):
return False
if env['os'] == 'win32' and env['library'] == 'shared' and env['prof'] == 'on':
Abort("Profiling on windows only supported for static library.")
- if env['gdbjit'] == 'on' and (env['os'] != 'linux' or (env['arch'] != 'ia32' and env['arch'] != 'x64' and env['arch'] != 'arm')):
- Abort("GDBJIT interface is supported only for Intel-compatible (ia32 or x64) Linux target.")
+ if env['gdbjit'] == 'on' and ((env['os'] != 'linux' and env['os'] != 'macos') or (env['arch'] != 'ia32' and env['arch'] != 'x64' and env['arch'] != 'arm')):
+ Abort("GDBJIT interface is supported only for Intel-compatible (ia32 or x64) Linux/OSX target.")
if env['os'] == 'win32' and env['soname'] == 'on':
Abort("Shared Object soname not applicable for Windows.")
if env['soname'] == 'on' and env['library'] == 'static':
diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h
index e4ac1a3e1..fb10c7157 100644
--- a/deps/v8/include/v8.h
+++ b/deps/v8/include/v8.h
@@ -2558,18 +2558,6 @@ typedef void (*GCCallback)();
/**
- * Profiler modules.
- *
- * In V8, profiler consists of several modules. Each can be turned on / off
- * independently.
- */
-enum ProfilerModules {
- PROFILER_MODULE_NONE = 0,
- PROFILER_MODULE_CPU = 1
-};
-
-
-/**
* Collection of V8 heap information.
*
* Instances of this class can be passed to v8::V8::HeapStatistics to
@@ -2996,40 +2984,6 @@ class V8EXPORT V8 {
static bool IsProfilerPaused();
/**
- * Resumes specified profiler modules. Can be called several times to
- * mark the opening of a profiler events block with the given tag.
- *
- * "ResumeProfiler" is equivalent to "ResumeProfilerEx(PROFILER_MODULE_CPU)".
- * See ProfilerModules enum.
- *
- * \param flags Flags specifying profiler modules.
- * \param tag Profile tag.
- */
- static void ResumeProfilerEx(int flags, int tag = 0);
-
- /**
- * Pauses specified profiler modules. Each call to "PauseProfilerEx" closes
- * a block of profiler events opened by a call to "ResumeProfilerEx" with the
- * same tag value. There is no need for blocks to be properly nested.
- * The profiler is paused when the last opened block is closed.
- *
- * "PauseProfiler" is equivalent to "PauseProfilerEx(PROFILER_MODULE_CPU)".
- * See ProfilerModules enum.
- *
- * \param flags Flags specifying profiler modules.
- * \param tag Profile tag.
- */
- static void PauseProfilerEx(int flags, int tag = 0);
-
- /**
- * Returns active (resumed) profiler modules.
- * See ProfilerModules enum.
- *
- * \returns active profiler modules.
- */
- static int GetActiveProfilerModules();
-
- /**
* If logging is performed into a memory buffer (via --logfile=*), allows to
* retrieve previously written messages. This can be used for retrieving
* profiler log data in the application. This function is thread-safe.
diff --git a/deps/v8/samples/shell.cc b/deps/v8/samples/shell.cc
index 15c1a5ad7..7c30beccd 100644
--- a/deps/v8/samples/shell.cc
+++ b/deps/v8/samples/shell.cc
@@ -498,12 +498,15 @@ void ExternalArrayWeakCallback(v8::Persistent<v8::Value> object, void* data) {
v8::Handle<v8::Value> CreateExternalArray(const v8::Arguments& args,
v8::ExternalArrayType type,
size_t element_size) {
- ASSERT(element_size == 1 || element_size == 2 || element_size == 4 ||
+ assert(element_size == 1 ||
+ element_size == 2 ||
+ element_size == 4 ||
element_size == 8);
if (args.Length() != 1) {
return v8::ThrowException(
v8::String::New("Array constructor needs one parameter."));
}
+ static const int kMaxLength = 0x3fffffff;
size_t length = 0;
if (args[0]->IsUint32()) {
length = args[0]->Uint32Value();
@@ -513,7 +516,7 @@ v8::Handle<v8::Value> CreateExternalArray(const v8::Arguments& args,
return v8::ThrowException(
v8::String::New("Array length must not be negative."));
}
- if (raw_length > v8::internal::ExternalArray::kMaxLength) {
+ if (raw_length > kMaxLength) {
return v8::ThrowException(
v8::String::New("Array length exceeds maximum length."));
}
@@ -522,7 +525,7 @@ v8::Handle<v8::Value> CreateExternalArray(const v8::Arguments& args,
return v8::ThrowException(
v8::String::New("Array length must be a number."));
}
- if (length > static_cast<size_t>(v8::internal::ExternalArray::kMaxLength)) {
+ if (length > static_cast<size_t>(kMaxLength)) {
return v8::ThrowException(
v8::String::New("Array length exceeds maximum length."));
}
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index b33e14c01..d968bff57 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -4831,47 +4831,26 @@ void V8::RemoveMemoryAllocationCallback(MemoryAllocationCallback callback) {
void V8::PauseProfiler() {
#ifdef ENABLE_LOGGING_AND_PROFILING
- PauseProfilerEx(PROFILER_MODULE_CPU);
+ i::Isolate* isolate = i::Isolate::Current();
+ isolate->logger()->PauseProfiler();
#endif
}
void V8::ResumeProfiler() {
#ifdef ENABLE_LOGGING_AND_PROFILING
- ResumeProfilerEx(PROFILER_MODULE_CPU);
+ i::Isolate* isolate = i::Isolate::Current();
+ isolate->logger()->ResumeProfiler();
#endif
}
bool V8::IsProfilerPaused() {
#ifdef ENABLE_LOGGING_AND_PROFILING
- return LOGGER->GetActiveProfilerModules() & PROFILER_MODULE_CPU;
-#else
- return true;
-#endif
-}
-
-
-void V8::ResumeProfilerEx(int flags, int tag) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
i::Isolate* isolate = i::Isolate::Current();
- isolate->logger()->ResumeProfiler(flags, tag);
-#endif
-}
-
-
-void V8::PauseProfilerEx(int flags, int tag) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- LOGGER->PauseProfiler(flags, tag);
-#endif
-}
-
-
-int V8::GetActiveProfilerModules() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- return LOGGER->GetActiveProfilerModules();
+ return isolate->logger()->IsProfilerPaused();
#else
- return PROFILER_MODULE_NONE;
+ return true;
#endif
}
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index a97cf6b9c..fbf610a43 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -378,7 +378,6 @@ class Operand BASE_EMBEDDED {
INLINE(explicit Operand(int32_t immediate,
RelocInfo::Mode rmode = RelocInfo::NONE));
INLINE(explicit Operand(const ExternalReference& f));
- INLINE(explicit Operand(const char* s));
explicit Operand(Handle<Object> handle);
INLINE(explicit Operand(Smi* value));
@@ -1141,8 +1140,13 @@ class Assembler : public AssemblerBase {
void jmp(Label* L) { b(L, al); }
// Check the code size generated from label to here.
- int InstructionsGeneratedSince(Label* l) {
- return (pc_offset() - l->pos()) / kInstrSize;
+ int SizeOfCodeGeneratedSince(Label* label) {
+ return pc_offset() - label->pos();
+ }
+
+ // Check the number of instructions generated from label to here.
+ int InstructionsGeneratedSince(Label* label) {
+ return SizeOfCodeGeneratedSince(label) / kInstrSize;
}
// Check whether an immediate fits an addressing mode 1 instruction.
diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc
index f87fd8383..328102bb4 100644
--- a/deps/v8/src/arm/builtins-arm.cc
+++ b/deps/v8/src/arm/builtins-arm.cc
@@ -1044,8 +1044,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Invoke the code and pass argc as r0.
__ mov(r0, Operand(r3));
if (is_construct) {
- __ Call(masm->isolate()->builtins()->JSConstructCall(),
- RelocInfo::CODE_TARGET);
+ __ Call(masm->isolate()->builtins()->JSConstructCall());
} else {
ParameterCount actual(r0);
__ InvokeFunction(r1, actual, CALL_FUNCTION,
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
index 452e08cad..3c9733234 100644
--- a/deps/v8/src/arm/code-stubs-arm.cc
+++ b/deps/v8/src/arm/code-stubs-arm.cc
@@ -392,11 +392,11 @@ void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
__ mov(scratch1, Operand(r0));
ConvertToDoubleStub stub1(r3, r2, scratch1, scratch2);
__ push(lr);
- __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
+ __ Call(stub1.GetCode());
// Write Smi from r1 to r1 and r0 in double format.
__ mov(scratch1, Operand(r1));
ConvertToDoubleStub stub2(r1, r0, scratch1, scratch2);
- __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
+ __ Call(stub2.GetCode());
__ pop(lr);
}
}
@@ -473,7 +473,7 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
__ mov(scratch1, Operand(object));
ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2);
__ push(lr);
- __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
+ __ Call(stub.GetCode());
__ pop(lr);
}
@@ -1058,7 +1058,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
// Convert lhs to a double in r2, r3.
__ mov(r7, Operand(lhs));
ConvertToDoubleStub stub1(r3, r2, r7, r6);
- __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
+ __ Call(stub1.GetCode());
// Load rhs to a double in r0, r1.
__ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset));
__ pop(lr);
@@ -1100,7 +1100,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
// Convert rhs to a double in r0, r1.
__ mov(r7, Operand(rhs));
ConvertToDoubleStub stub2(r1, r0, r7, r6);
- __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
+ __ Call(stub2.GetCode());
__ pop(lr);
}
// Fall through to both_loaded_as_doubles.
@@ -1731,22 +1731,14 @@ void UnaryOpStub::Generate(MacroAssembler* masm) {
void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- // Prepare to push argument.
- __ mov(r3, Operand(r0));
-
- // Push this stub's key. Although the operation and the type info are
- // encoded into the key, the encoding is opaque, so push them too.
- __ mov(r2, Operand(Smi::FromInt(MinorKey())));
- __ mov(r1, Operand(Smi::FromInt(op_)));
+ __ mov(r3, Operand(r0)); // the operand
+ __ mov(r2, Operand(Smi::FromInt(op_)));
+ __ mov(r1, Operand(Smi::FromInt(mode_)));
__ mov(r0, Operand(Smi::FromInt(operand_type_)));
-
__ Push(r3, r2, r1, r0);
__ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kUnaryOp_Patch),
- masm->isolate()),
- 4,
- 1);
+ ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
}
diff --git a/deps/v8/src/arm/code-stubs-arm.h b/deps/v8/src/arm/code-stubs-arm.h
index 8e3e9dc00..742735130 100644
--- a/deps/v8/src/arm/code-stubs-arm.h
+++ b/deps/v8/src/arm/code-stubs-arm.h
@@ -60,18 +60,11 @@ class TranscendentalCacheStub: public CodeStub {
class UnaryOpStub: public CodeStub {
public:
- UnaryOpStub(Token::Value op, UnaryOverwriteMode mode)
+ UnaryOpStub(Token::Value op,
+ UnaryOverwriteMode mode,
+ UnaryOpIC::TypeInfo operand_type = UnaryOpIC::UNINITIALIZED)
: op_(op),
mode_(mode),
- operand_type_(UnaryOpIC::UNINITIALIZED),
- name_(NULL) {
- }
-
- UnaryOpStub(
- int key,
- UnaryOpIC::TypeInfo operand_type)
- : op_(OpBits::decode(key)),
- mode_(ModeBits::decode(key)),
operand_type_(operand_type),
name_(NULL) {
}
@@ -89,8 +82,7 @@ class UnaryOpStub: public CodeStub {
#ifdef DEBUG
void Print() {
- PrintF("UnaryOpStub %d (op %s), "
- "(mode %d, runtime_type_info %s)\n",
+ PrintF("UnaryOpStub %d (op %s), (mode %d, runtime_type_info %s)\n",
MinorKey(),
Token::String(op_),
static_cast<int>(mode_),
diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc
index e0e84ab33..cd70e6de8 100644
--- a/deps/v8/src/arm/deoptimizer-arm.cc
+++ b/deps/v8/src/arm/deoptimizer-arm.cc
@@ -267,6 +267,9 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
output_ = new FrameDescription*[1];
output_[0] = new(output_frame_size) FrameDescription(
output_frame_size, function_);
+#ifdef DEBUG
+ output_[0]->SetKind(Code::OPTIMIZED_FUNCTION);
+#endif
// Clear the incoming parameters in the optimized frame to avoid
// confusing the garbage collector.
@@ -382,6 +385,9 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
// Allocate and store the output frame description.
FrameDescription* output_frame =
new(output_frame_size) FrameDescription(output_frame_size, function);
+#ifdef DEBUG
+ output_frame->SetKind(Code::FUNCTION);
+#endif
bool is_bottommost = (0 == frame_index);
bool is_topmost = (output_count_ - 1 == frame_index);
@@ -516,7 +522,7 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
// Set the continuation for the topmost frame.
- if (is_topmost) {
+ if (is_topmost && bailout_type_ != DEBUGGER) {
Builtins* builtins = isolate_->builtins();
Code* continuation = (bailout_type_ == EAGER)
? builtins->builtin(Builtins::kNotifyDeoptimized)
@@ -529,8 +535,28 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
}
-#define __ masm()->
+void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
+ // Set the register values. The values are not important as there are no
+ // callee saved registers in JavaScript frames, so all registers are
+ // spilled. Registers fp and sp are set to the correct values though.
+
+ for (int i = 0; i < Register::kNumRegisters; i++) {
+ input_->SetRegister(i, i * 4);
+ }
+ input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
+ input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
+ for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
+ input_->SetDoubleRegister(i, 0.0);
+ }
+ // Fill the frame content from the actual data on the frame.
+ for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
+ input_->SetFrameSlot(i, Memory::uint32_at(tos + i));
+ }
+}
+
+
+#define __ masm()->
// This code tries to be close to ia32 code so that any changes can be
// easily ported.
diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc
index 9b771dae2..4b55915e9 100644
--- a/deps/v8/src/arm/full-codegen-arm.cc
+++ b/deps/v8/src/arm/full-codegen-arm.cc
@@ -92,17 +92,19 @@ class JumpPatchSite BASE_EMBEDDED {
}
void EmitPatchInfo() {
- int delta_to_patch_site = masm_->InstructionsGeneratedSince(&patch_site_);
- Register reg;
- reg.set_code(delta_to_patch_site / kOff12Mask);
- __ cmp_raw_immediate(reg, delta_to_patch_site % kOff12Mask);
+ if (patch_site_.is_bound()) {
+ int delta_to_patch_site = masm_->InstructionsGeneratedSince(&patch_site_);
+ Register reg;
+ reg.set_code(delta_to_patch_site / kOff12Mask);
+ __ cmp_raw_immediate(reg, delta_to_patch_site % kOff12Mask);
#ifdef DEBUG
- info_emitted_ = true;
+ info_emitted_ = true;
#endif
+ } else {
+ __ nop(); // Signals no inlined code.
+ }
}
- bool is_bound() const { return patch_site_.is_bound(); }
-
private:
MacroAssembler* masm_;
Label patch_site_;
@@ -129,6 +131,7 @@ class JumpPatchSite BASE_EMBEDDED {
void FullCodeGenerator::Generate(CompilationInfo* info) {
ASSERT(info_ == NULL);
info_ = info;
+ scope_ = info->scope();
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
@@ -147,13 +150,13 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
Label ok;
__ cmp(r5, Operand(0));
__ b(eq, &ok);
- int receiver_offset = scope()->num_parameters() * kPointerSize;
+ int receiver_offset = info->scope()->num_parameters() * kPointerSize;
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
__ str(r2, MemOperand(sp, receiver_offset));
__ bind(&ok);
}
- int locals_count = scope()->num_stack_slots();
+ int locals_count = info->scope()->num_stack_slots();
__ Push(lr, fp, cp, r1);
if (locals_count > 0) {
@@ -173,7 +176,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
bool function_in_register = true;
// Possibly allocate a local context.
- int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
Comment cmnt(masm_, "[ Allocate local context");
// Argument to NewContext is the function, which is in r1.
@@ -189,7 +192,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// passed to us. It's saved in the stack and kept live in cp.
__ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Copy any necessary parameters into the context.
- int num_parameters = scope()->num_parameters();
+ int num_parameters = info->scope()->num_parameters();
for (int i = 0; i < num_parameters; i++) {
Slot* slot = scope()->parameter(i)->AsSlot();
if (slot != NULL && slot->type() == Slot::CONTEXT) {
@@ -220,10 +223,11 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
__ mov(r3, r1);
}
// Receiver is just before the parameters on the caller's stack.
- int offset = scope()->num_parameters() * kPointerSize;
+ int num_parameters = info->scope()->num_parameters();
+ int offset = num_parameters * kPointerSize;
__ add(r2, fp,
Operand(StandardFrameConstants::kCallerSPOffset + offset));
- __ mov(r1, Operand(Smi::FromInt(scope()->num_parameters())));
+ __ mov(r1, Operand(Smi::FromInt(num_parameters)));
__ Push(r3, r2, r1);
// Arguments to ArgumentsAccessStub:
@@ -345,7 +349,7 @@ void FullCodeGenerator::EmitReturnSequence() {
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
// Here we use masm_-> instead of the __ macro to avoid the code coverage
// tool from instrumenting as we rely on the code size here.
- int32_t sp_delta = (scope()->num_parameters() + 1) * kPointerSize;
+ int32_t sp_delta = (info_->scope()->num_parameters() + 1) * kPointerSize;
CodeGenerator::RecordPositions(masm_, function()->end_position() - 1);
__ RecordJSReturn();
masm_->mov(sp, fp);
@@ -786,7 +790,7 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
+ __ Call(ic);
// Value in r0 is ignored (declarations are statements).
}
}
@@ -860,7 +864,8 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
- EmitCallIC(ic, &patch_site, clause->CompareId());
+ __ Call(ic, RelocInfo::CODE_TARGET, clause->CompareId());
+ patch_site.EmitPatchInfo();
__ cmp(r0, Operand(0));
__ b(ne, &next_test);
@@ -1167,7 +1172,7 @@ void FullCodeGenerator::EmitLoadGlobalSlotCheckExtensions(
? RelocInfo::CODE_TARGET
: RelocInfo::CODE_TARGET_CONTEXT;
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- EmitCallIC(ic, mode, AstNode::kNoNumber);
+ __ Call(ic, mode);
}
@@ -1248,7 +1253,7 @@ void FullCodeGenerator::EmitDynamicLoadFromSlotFastCase(
__ mov(r0, Operand(key_literal->handle()));
Handle<Code> ic =
isolate()->builtins()->KeyedLoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(property));
+ __ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(property));
__ jmp(done);
}
}
@@ -1270,7 +1275,7 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var) {
__ ldr(r0, GlobalObjectOperand());
__ mov(r2, Operand(var->name()));
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT, AstNode::kNoNumber);
+ __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
context()->Plug(r0);
} else if (slot->type() == Slot::LOOKUP) {
@@ -1414,7 +1419,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, key->id());
+ __ Call(ic, RelocInfo::CODE_TARGET, key->id());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
VisitForEffect(value);
@@ -1654,7 +1659,7 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
__ mov(r2, Operand(key->handle()));
// Call load IC. It has arguments receiver and property name r0 and r2.
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+ __ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
}
@@ -1662,7 +1667,7 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
// Call keyed load IC. It has arguments key and receiver in r0 and r1.
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+ __ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
}
@@ -1689,7 +1694,8 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&stub_call);
BinaryOpStub stub(op, mode);
- EmitCallIC(stub.GetCode(), &patch_site, expr->id());
+ __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ patch_site.EmitPatchInfo();
__ jmp(&done);
__ bind(&smi_case);
@@ -1770,7 +1776,9 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
OverwriteMode mode) {
__ pop(r1);
BinaryOpStub stub(op, mode);
- EmitCallIC(stub.GetCode(), NULL, expr->id());
+ JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
+ __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ patch_site.EmitPatchInfo();
context()->Plug(r0);
}
@@ -1810,7 +1818,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
+ __ Call(ic);
break;
}
case KEYED_PROPERTY: {
@@ -1823,7 +1831,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
+ __ Call(ic);
break;
}
}
@@ -1847,7 +1855,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT, AstNode::kNoNumber);
+ __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
} else if (op == Token::INIT_CONST) {
// Like var declarations, const declarations are hoisted to function
@@ -1945,7 +1953,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@@ -1991,7 +1999,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@@ -2043,7 +2051,7 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr,
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
Handle<Code> ic =
isolate()->stub_cache()->ComputeCallInitialize(arg_count, in_loop, mode);
- EmitCallIC(ic, mode, expr->id());
+ __ Call(ic, mode, expr->id());
RecordJSReturnSite(expr);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2077,7 +2085,7 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
Handle<Code> ic =
isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count, in_loop);
__ ldr(r2, MemOperand(sp, (arg_count + 1) * kPointerSize)); // Key.
- EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
RecordJSReturnSite(expr);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2117,7 +2125,8 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
__ push(r1);
// Push the receiver of the enclosing function and do runtime call.
- __ ldr(r1, MemOperand(fp, (2 + scope()->num_parameters()) * kPointerSize));
+ int receiver_offset = 2 + info_->scope()->num_parameters();
+ __ ldr(r1, MemOperand(fp, receiver_offset * kPointerSize));
__ push(r1);
// Push the strict mode flag.
__ mov(r1, Operand(Smi::FromInt(strict_mode_flag())));
@@ -2260,7 +2269,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
} else {
// Call to a keyed property.
// For a synthetic property use keyed load IC followed by function call,
- // for a regular property use keyed EmitCallIC.
+ // for a regular property use EmitKeyedCallWithIC.
if (prop->is_synthetic()) {
// Do not visit the object and key subexpressions (they are shared
// by all occurrences of the same rewritten parameter).
@@ -2278,7 +2287,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
SetSourcePosition(prop->position());
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+ __ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
__ ldr(r1, GlobalObjectOperand());
__ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
__ Push(r0, r1); // Function, receiver.
@@ -2669,7 +2678,7 @@ void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
// parameter count in r0.
VisitForAccumulatorValue(args->at(0));
__ mov(r1, r0);
- __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
+ __ mov(r0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
__ CallStub(&stub);
context()->Plug(r0);
@@ -2681,7 +2690,7 @@ void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
Label exit;
// Get the number of formal parameters.
- __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
+ __ mov(r0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
// Check if the calling frame is an arguments adaptor frame.
__ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
@@ -3568,6 +3577,39 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
}
+void FullCodeGenerator::EmitIsNativeOrStrictMode(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ // Load the function into r0.
+ VisitForAccumulatorValue(args->at(0));
+
+ // Prepare for the test.
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ // Test for strict mode function.
+ __ ldr(r1, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r1, FieldMemOperand(r1, SharedFunctionInfo::kCompilerHintsOffset));
+ __ tst(r1, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
+ kSmiTagSize)));
+ __ b(ne, if_true);
+
+ // Test for native function.
+ __ tst(r1, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
+ __ b(ne, if_true);
+
+ // Not native or strict-mode function.
+ __ b(if_false);
+
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ context()->Plug(if_true, if_false);
+}
+
+
void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
Handle<String> name = expr->name();
if (name->length() > 0 && name->Get(0) == '_') {
@@ -3600,7 +3642,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
isolate()->stub_cache()->ComputeCallInitialize(arg_count,
NOT_IN_LOOP,
mode);
- EmitCallIC(ic, mode, expr->id());
+ __ Call(ic, mode, expr->id());
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
} else {
@@ -3742,7 +3784,7 @@ void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
// accumulator register r0.
VisitForAccumulatorValue(expr->expression());
SetSourcePosition(expr->position());
- EmitCallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
context()->Plug(r0);
}
@@ -3853,7 +3895,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
SetSourcePosition(expr->position());
BinaryOpStub stub(Token::ADD, NO_OVERWRITE);
- EmitCallIC(stub.GetCode(), &patch_site, expr->CountId());
+ __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId());
+ patch_site.EmitPatchInfo();
__ bind(&done);
// Store the value returned in r0.
@@ -3884,7 +3927,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -3901,7 +3944,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -3927,7 +3970,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
// Use a regular load, not a contextual load, to avoid a reference
// error.
- EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
+ __ Call(ic);
PrepareForBailout(expr, TOS_REG);
context()->Plug(r0);
} else if (proxy != NULL &&
@@ -4126,7 +4169,8 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC.
SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(op);
- EmitCallIC(ic, &patch_site, expr->id());
+ __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
+ patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
__ cmp(r0, Operand(0));
Split(cond, if_true, if_false, fall_through);
@@ -4187,70 +4231,6 @@ Register FullCodeGenerator::context_register() {
}
-void FullCodeGenerator::EmitCallIC(Handle<Code> ic,
- RelocInfo::Mode mode,
- unsigned ast_id) {
- ASSERT(mode == RelocInfo::CODE_TARGET ||
- mode == RelocInfo::CODE_TARGET_CONTEXT);
- Counters* counters = isolate()->counters();
- switch (ic->kind()) {
- case Code::LOAD_IC:
- __ IncrementCounter(counters->named_load_full(), 1, r1, r2);
- break;
- case Code::KEYED_LOAD_IC:
- __ IncrementCounter(counters->keyed_load_full(), 1, r1, r2);
- break;
- case Code::STORE_IC:
- __ IncrementCounter(counters->named_store_full(), 1, r1, r2);
- break;
- case Code::KEYED_STORE_IC:
- __ IncrementCounter(counters->keyed_store_full(), 1, r1, r2);
- default:
- break;
- }
- if (ast_id == kNoASTId || mode == RelocInfo::CODE_TARGET_CONTEXT) {
- __ Call(ic, mode);
- } else {
- ASSERT(mode == RelocInfo::CODE_TARGET);
- mode = RelocInfo::CODE_TARGET_WITH_ID;
- __ CallWithAstId(ic, mode, ast_id);
- }
-}
-
-
-void FullCodeGenerator::EmitCallIC(Handle<Code> ic,
- JumpPatchSite* patch_site,
- unsigned ast_id) {
- Counters* counters = isolate()->counters();
- switch (ic->kind()) {
- case Code::LOAD_IC:
- __ IncrementCounter(counters->named_load_full(), 1, r1, r2);
- break;
- case Code::KEYED_LOAD_IC:
- __ IncrementCounter(counters->keyed_load_full(), 1, r1, r2);
- break;
- case Code::STORE_IC:
- __ IncrementCounter(counters->named_store_full(), 1, r1, r2);
- break;
- case Code::KEYED_STORE_IC:
- __ IncrementCounter(counters->keyed_store_full(), 1, r1, r2);
- default:
- break;
- }
-
- if (ast_id == kNoASTId) {
- __ Call(ic, RelocInfo::CODE_TARGET);
- } else {
- __ CallWithAstId(ic, RelocInfo::CODE_TARGET_WITH_ID, ast_id);
- }
- if (patch_site != NULL && patch_site->is_bound()) {
- patch_site->EmitPatchInfo();
- } else {
- __ nop(); // Signals no inlined code.
- }
-}
-
-
void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
__ str(value, MemOperand(fp, frame_offset));
@@ -4263,19 +4243,20 @@ void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
- if (scope()->is_global_scope()) {
+ Scope* declaration_scope = scope()->DeclarationScope();
+ if (declaration_scope->is_global_scope()) {
// Contexts nested in the global context have a canonical empty function
// as their closure, not the anonymous closure containing the global
// code. Pass a smi sentinel and let the runtime look up the empty
// function.
__ mov(ip, Operand(Smi::FromInt(0)));
- } else if (scope()->is_eval_scope()) {
+ } else if (declaration_scope->is_eval_scope()) {
// Contexts created by a call to eval have the same closure as the
// context calling eval, not the anonymous closure containing the eval
// code. Fetch it from the context.
__ ldr(ip, ContextOperand(cp, Context::CLOSURE_INDEX));
} else {
- ASSERT(scope()->is_function_scope());
+ ASSERT(declaration_scope->is_function_scope());
__ ldr(ip, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
}
__ push(ip);
diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc
index 676baeb35..dea875bad 100644
--- a/deps/v8/src/arm/ic-arm.cc
+++ b/deps/v8/src/arm/ic-arm.cc
@@ -952,6 +952,9 @@ static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
Register backing_store = parameter_map;
__ ldr(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
+ Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
+ __ CheckMap(backing_store, scratch, fixed_array_map, slow_case,
+ DONT_DO_SMI_CHECK);
__ ldr(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
__ cmp(key, Operand(scratch));
__ b(cs, slow_case);
diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc
index 93a1865e7..63e316921 100644
--- a/deps/v8/src/arm/lithium-arm.cc
+++ b/deps/v8/src/arm/lithium-arm.cc
@@ -265,12 +265,6 @@ void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
}
-void LTypeofIs::PrintDataTo(StringStream* stream) {
- InputAt(0)->PrintTo(stream);
- stream->Add(" == \"%s\"", *hydrogen()->type_literal()->ToCString());
-}
-
-
void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if typeof ");
InputAt(0)->PrintTo(stream);
@@ -340,13 +334,6 @@ void LCallNew::PrintDataTo(StringStream* stream) {
}
-void LClassOfTest::PrintDataTo(StringStream* stream) {
- stream->Add("= class_of_test(");
- InputAt(0)->PrintTo(stream);
- stream->Add(", \"%o\")", *hydrogen()->class_name());
-}
-
-
void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
arguments()->PrintTo(stream);
@@ -990,18 +977,7 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
if (FLAG_stress_environments && !instr->HasEnvironment()) {
instr = AssignEnvironment(instr);
}
- if (current->IsTest() && !instr->IsGoto()) {
- ASSERT(instr->IsControl());
- HTest* test = HTest::cast(current);
- instr->set_hydrogen_value(test->value());
- HBasicBlock* first = test->FirstSuccessor();
- HBasicBlock* second = test->SecondSuccessor();
- ASSERT(first != NULL && second != NULL);
- instr->SetBranchTargets(first->block_id(), second->block_id());
- } else {
- instr->set_hydrogen_value(current);
- }
-
+ instr->set_hydrogen_value(current);
chunk_->AddInstruction(instr, current_block_);
}
current_instruction_ = old_current;
@@ -1046,80 +1022,15 @@ LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
}
-LInstruction* LChunkBuilder::DoTest(HTest* instr) {
+LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
HValue* v = instr->value();
- if (!v->EmitAtUses()) return new LBranch(UseRegisterAtStart(v));
- ASSERT(!v->HasSideEffects());
- if (v->IsClassOfTest()) {
- HClassOfTest* compare = HClassOfTest::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
- return new LClassOfTestAndBranch(UseTempRegister(compare->value()),
- TempRegister());
- } else if (v->IsCompare()) {
- HCompare* compare = HCompare::cast(v);
- HValue* left = compare->left();
- HValue* right = compare->right();
- Representation r = compare->GetInputRepresentation();
- if (r.IsInteger32()) {
- ASSERT(left->representation().IsInteger32());
- ASSERT(right->representation().IsInteger32());
- return new LCmpIDAndBranch(UseRegisterAtStart(left),
- UseRegisterAtStart(right));
- } else {
- ASSERT(r.IsDouble());
- ASSERT(left->representation().IsDouble());
- ASSERT(right->representation().IsDouble());
- return new LCmpIDAndBranch(UseRegisterAtStart(left),
- UseRegisterAtStart(right));
- }
- } else if (v->IsIsSmi()) {
- HIsSmi* compare = HIsSmi::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
- return new LIsSmiAndBranch(Use(compare->value()));
- } else if (v->IsIsUndetectable()) {
- HIsUndetectable* compare = HIsUndetectable::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
- return new LIsUndetectableAndBranch(UseRegisterAtStart(compare->value()),
- TempRegister());
- } else if (v->IsHasInstanceType()) {
- HHasInstanceType* compare = HHasInstanceType::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
- return new LHasInstanceTypeAndBranch(UseRegisterAtStart(compare->value()));
- } else if (v->IsHasCachedArrayIndex()) {
- HHasCachedArrayIndex* compare = HHasCachedArrayIndex::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
- return new LHasCachedArrayIndexAndBranch(
- UseRegisterAtStart(compare->value()));
- } else if (v->IsIsNull()) {
- HIsNull* compare = HIsNull::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
- return new LIsNullAndBranch(UseRegisterAtStart(compare->value()));
- } else if (v->IsIsObject()) {
- HIsObject* compare = HIsObject::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
- LOperand* temp = TempRegister();
- return new LIsObjectAndBranch(UseRegisterAtStart(compare->value()), temp);
- } else if (v->IsCompareObjectEq()) {
- HCompareObjectEq* compare = HCompareObjectEq::cast(v);
- return new LCmpObjectEqAndBranch(UseRegisterAtStart(compare->left()),
- UseRegisterAtStart(compare->right()));
- } else if (v->IsCompareConstantEq()) {
- HCompareConstantEq* compare = HCompareConstantEq::cast(v);
- return new LCmpConstantEqAndBranch(UseRegisterAtStart(compare->value()));
- } else if (v->IsTypeofIs()) {
- HTypeofIs* typeof_is = HTypeofIs::cast(v);
- return new LTypeofIsAndBranch(UseTempRegister(typeof_is->value()));
- } else if (v->IsIsConstructCall()) {
- return new LIsConstructCallAndBranch(TempRegister());
- } else if (v->IsConstant()) {
+ if (v->EmitAtUses()) {
HBasicBlock* successor = HConstant::cast(v)->ToBoolean()
? instr->FirstSuccessor()
: instr->SecondSuccessor();
return new LGoto(successor->block_id());
- } else {
- Abort("Undefined compare before branch");
- return NULL;
}
+ return new LBranch(UseRegisterAtStart(v));
}
@@ -1477,85 +1388,84 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
}
-LInstruction* LChunkBuilder::DoCompare(HCompare* instr) {
+LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
Token::Value op = instr->token();
Representation r = instr->GetInputRepresentation();
+ ASSERT(instr->left()->representation().IsTagged());
+ ASSERT(instr->right()->representation().IsTagged());
+ bool reversed = (op == Token::GT || op == Token::LTE);
+ LOperand* left = UseFixed(instr->left(), reversed ? r0 : r1);
+ LOperand* right = UseFixed(instr->right(), reversed ? r1 : r0);
+ LCmpT* result = new LCmpT(left, right);
+ return MarkAsCall(DefineFixed(result, r0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareIDAndBranch(
+ HCompareIDAndBranch* instr) {
+ Representation r = instr->GetInputRepresentation();
if (r.IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
- return DefineAsRegister(new LCmpID(left, right));
- } else if (r.IsDouble()) {
+ return new LCmpIDAndBranch(left, right);
+ } else {
+ ASSERT(r.IsDouble());
ASSERT(instr->left()->representation().IsDouble());
ASSERT(instr->right()->representation().IsDouble());
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
- return DefineAsRegister(new LCmpID(left, right));
- } else {
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
- bool reversed = (op == Token::GT || op == Token::LTE);
- LOperand* left = UseFixed(instr->left(), reversed ? r0 : r1);
- LOperand* right = UseFixed(instr->right(), reversed ? r1 : r0);
- LCmpT* result = new LCmpT(left, right);
- return MarkAsCall(DefineFixed(result, r0), instr);
+ return new LCmpIDAndBranch(left, right);
}
}
-LInstruction* LChunkBuilder::DoCompareObjectEq(HCompareObjectEq* instr) {
+LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
+ HCompareObjectEqAndBranch* instr) {
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
- LCmpObjectEq* result = new LCmpObjectEq(left, right);
- return DefineAsRegister(result);
+ return new LCmpObjectEqAndBranch(left, right);
}
-LInstruction* LChunkBuilder::DoCompareConstantEq(
- HCompareConstantEq* instr) {
- LOperand* left = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LCmpConstantEq(left));
+LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
+ HCompareConstantEqAndBranch* instr) {
+ return new LCmpConstantEqAndBranch(UseRegisterAtStart(instr->value()));
}
-LInstruction* LChunkBuilder::DoIsNull(HIsNull* instr) {
+LInstruction* LChunkBuilder::DoIsNullAndBranch(HIsNullAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
-
- return DefineAsRegister(new LIsNull(value));
+ return new LIsNullAndBranch(UseRegisterAtStart(instr->value()));
}
-LInstruction* LChunkBuilder::DoIsObject(HIsObject* instr) {
+LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
-
- return DefineAsRegister(new LIsObject(value));
+ LOperand* temp = TempRegister();
+ return new LIsObjectAndBranch(UseRegisterAtStart(instr->value()), temp);
}
-LInstruction* LChunkBuilder::DoIsSmi(HIsSmi* instr) {
+LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseAtStart(instr->value());
-
- return DefineAsRegister(new LIsSmi(value));
+ return new LIsSmiAndBranch(Use(instr->value()));
}
-LInstruction* LChunkBuilder::DoIsUndetectable(HIsUndetectable* instr) {
+LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
+ HIsUndetectableAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
-
- return DefineAsRegister(new LIsUndetectable(value));
+ return new LIsUndetectableAndBranch(UseRegisterAtStart(instr->value()),
+ TempRegister());
}
-LInstruction* LChunkBuilder::DoHasInstanceType(HHasInstanceType* instr) {
+LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
+ HHasInstanceTypeAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
-
- return DefineAsRegister(new LHasInstanceType(value));
+ return new LHasInstanceTypeAndBranch(UseRegisterAtStart(instr->value()));
}
@@ -1568,19 +1478,19 @@ LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
}
-LInstruction* LChunkBuilder::DoHasCachedArrayIndex(
- HHasCachedArrayIndex* instr) {
+LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
+ HHasCachedArrayIndexAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegister(instr->value());
-
- return DefineAsRegister(new LHasCachedArrayIndex(value));
+ return new LHasCachedArrayIndexAndBranch(
+ UseRegisterAtStart(instr->value()));
}
-LInstruction* LChunkBuilder::DoClassOfTest(HClassOfTest* instr) {
+LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
+ HClassOfTestAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseTempRegister(instr->value());
- return DefineSameAsFirst(new LClassOfTest(value));
+ return new LClassOfTestAndBranch(UseTempRegister(instr->value()),
+ TempRegister());
}
@@ -2169,13 +2079,14 @@ LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
}
-LInstruction* LChunkBuilder::DoTypeofIs(HTypeofIs* instr) {
- return DefineSameAsFirst(new LTypeofIs(UseRegister(instr->value())));
+LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
+ return new LTypeofIsAndBranch(UseTempRegister(instr->value()));
}
-LInstruction* LChunkBuilder::DoIsConstructCall(HIsConstructCall* instr) {
- return DefineAsRegister(new LIsConstructCall());
+LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
+ HIsConstructCallAndBranch* instr) {
+ return new LIsConstructCallAndBranch(TempRegister());
}
diff --git a/deps/v8/src/arm/lithium-arm.h b/deps/v8/src/arm/lithium-arm.h
index c864d20f3..ebeba8694 100644
--- a/deps/v8/src/arm/lithium-arm.h
+++ b/deps/v8/src/arm/lithium-arm.h
@@ -77,13 +77,9 @@ class LCodeGen;
V(ClampDToUint8) \
V(ClampIToUint8) \
V(ClampTToUint8) \
- V(ClassOfTest) \
V(ClassOfTestAndBranch) \
- V(CmpConstantEq) \
V(CmpConstantEqAndBranch) \
- V(CmpID) \
V(CmpIDAndBranch) \
- V(CmpObjectEq) \
V(CmpObjectEqAndBranch) \
V(CmpMapAndBranch) \
V(CmpT) \
@@ -103,9 +99,7 @@ class LCodeGen;
V(GlobalObject) \
V(GlobalReceiver) \
V(Goto) \
- V(HasCachedArrayIndex) \
V(HasCachedArrayIndexAndBranch) \
- V(HasInstanceType) \
V(HasInstanceTypeAndBranch) \
V(In) \
V(InstanceOf) \
@@ -113,15 +107,10 @@ class LCodeGen;
V(InstructionGap) \
V(Integer32ToDouble) \
V(InvokeFunction) \
- V(IsConstructCall) \
V(IsConstructCallAndBranch) \
- V(IsNull) \
V(IsNullAndBranch) \
- V(IsObject) \
V(IsObjectAndBranch) \
- V(IsSmi) \
V(IsSmiAndBranch) \
- V(IsUndetectable) \
V(IsUndetectableAndBranch) \
V(JSArrayLength) \
V(Label) \
@@ -173,7 +162,6 @@ class LCodeGen;
V(Throw) \
V(ToFastProperties) \
V(Typeof) \
- V(TypeofIs) \
V(TypeofIsAndBranch) \
V(UnaryMathOperation) \
V(UnknownOSRValue) \
@@ -232,7 +220,6 @@ class LInstruction: public ZoneObject {
virtual bool IsGap() const { return false; }
virtual bool IsControl() const { return false; }
- virtual void SetBranchTargets(int true_block_id, int false_block_id) { }
void set_environment(LEnvironment* env) { environment_ = env; }
LEnvironment* environment() const { return environment_; }
@@ -456,16 +443,15 @@ class LControlInstruction: public LTemplateInstruction<0, I, T> {
public:
virtual bool IsControl() const { return true; }
- int true_block_id() const { return true_block_id_; }
- int false_block_id() const { return false_block_id_; }
- void SetBranchTargets(int true_block_id, int false_block_id) {
- true_block_id_ = true_block_id;
- false_block_id_ = false_block_id;
- }
+ int SuccessorCount() { return hydrogen()->SuccessorCount(); }
+ HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
+ int true_block_id() { return hydrogen()->SuccessorAt(0)->block_id(); }
+ int false_block_id() { return hydrogen()->SuccessorAt(1)->block_id(); }
private:
- int true_block_id_;
- int false_block_id_;
+ HControlInstruction* hydrogen() {
+ return HControlInstruction::cast(this->hydrogen_value());
+ }
};
@@ -581,23 +567,6 @@ class LMulI: public LTemplateInstruction<1, 2, 1> {
};
-class LCmpID: public LTemplateInstruction<1, 2, 0> {
- public:
- LCmpID(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpID, "cmp-id")
- DECLARE_HYDROGEN_ACCESSOR(Compare)
-
- Token::Value op() const { return hydrogen()->token(); }
- bool is_double() const {
- return hydrogen()->GetInputRepresentation().IsDouble();
- }
-};
-
-
class LCmpIDAndBranch: public LControlInstruction<2, 0> {
public:
LCmpIDAndBranch(LOperand* left, LOperand* right) {
@@ -606,7 +575,7 @@ class LCmpIDAndBranch: public LControlInstruction<2, 0> {
}
DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(Compare)
+ DECLARE_HYDROGEN_ACCESSOR(CompareIDAndBranch)
Token::Value op() const { return hydrogen()->token(); }
bool is_double() const {
@@ -632,17 +601,6 @@ class LUnaryMathOperation: public LTemplateInstruction<1, 1, 1> {
};
-class LCmpObjectEq: public LTemplateInstruction<1, 2, 0> {
- public:
- LCmpObjectEq(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpObjectEq, "cmp-object-eq")
-};
-
-
class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
public:
LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
@@ -652,17 +610,7 @@ class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch,
"cmp-object-eq-and-branch")
-};
-
-
-class LCmpConstantEq: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCmpConstantEq(LOperand* left) {
- inputs_[0] = left;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpConstantEq, "cmp-constant-eq")
- DECLARE_HYDROGEN_ACCESSOR(CompareConstantEq)
+ DECLARE_HYDROGEN_ACCESSOR(CompareObjectEqAndBranch)
};
@@ -674,22 +622,10 @@ class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CmpConstantEqAndBranch,
"cmp-constant-eq-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareConstantEq)
+ DECLARE_HYDROGEN_ACCESSOR(CompareConstantEqAndBranch)
};
-class LIsNull: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LIsNull(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(IsNull, "is-null")
- DECLARE_HYDROGEN_ACCESSOR(IsNull)
-
- bool is_strict() const { return hydrogen()->is_strict(); }
-};
-
class LIsNullAndBranch: public LControlInstruction<1, 0> {
public:
explicit LIsNullAndBranch(LOperand* value) {
@@ -697,7 +633,7 @@ class LIsNullAndBranch: public LControlInstruction<1, 0> {
}
DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsNull)
+ DECLARE_HYDROGEN_ACCESSOR(IsNullAndBranch)
bool is_strict() const { return hydrogen()->is_strict(); }
@@ -705,16 +641,6 @@ class LIsNullAndBranch: public LControlInstruction<1, 0> {
};
-class LIsObject: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LIsObject(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(IsObject, "is-object")
-};
-
-
class LIsObjectAndBranch: public LControlInstruction<1, 1> {
public:
LIsObjectAndBranch(LOperand* value, LOperand* temp) {
@@ -723,22 +649,12 @@ class LIsObjectAndBranch: public LControlInstruction<1, 1> {
}
DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
virtual void PrintDataTo(StringStream* stream);
};
-class LIsSmi: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LIsSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(IsSmi, "is-smi")
- DECLARE_HYDROGEN_ACCESSOR(IsSmi)
-};
-
-
class LIsSmiAndBranch: public LControlInstruction<1, 0> {
public:
explicit LIsSmiAndBranch(LOperand* value) {
@@ -746,22 +662,12 @@ class LIsSmiAndBranch: public LControlInstruction<1, 0> {
}
DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
virtual void PrintDataTo(StringStream* stream);
};
-class LIsUndetectable: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LIsUndetectable(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(IsUndetectable, "is-undetectable")
- DECLARE_HYDROGEN_ACCESSOR(IsUndetectable)
-};
-
-
class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
public:
explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
@@ -771,22 +677,12 @@ class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
"is-undetectable-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
virtual void PrintDataTo(StringStream* stream);
};
-class LHasInstanceType: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LHasInstanceType(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(HasInstanceType, "has-instance-type")
- DECLARE_HYDROGEN_ACCESSOR(HasInstanceType)
-};
-
-
class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
public:
explicit LHasInstanceTypeAndBranch(LOperand* value) {
@@ -795,7 +691,7 @@ class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
"has-instance-type-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(HasInstanceType)
+ DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
virtual void PrintDataTo(StringStream* stream);
};
@@ -812,17 +708,6 @@ class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
};
-class LHasCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LHasCachedArrayIndex(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndex, "has-cached-array-index")
- DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndex)
-};
-
-
class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
public:
explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
@@ -831,18 +716,7 @@ class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
"has-cached-array-index-and-branch")
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LClassOfTest: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LClassOfTest(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ClassOfTest, "class-of-test")
- DECLARE_HYDROGEN_ACCESSOR(ClassOfTest)
+ DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
virtual void PrintDataTo(StringStream* stream);
};
@@ -857,7 +731,7 @@ class LClassOfTestAndBranch: public LControlInstruction<1, 1> {
DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
"class-of-test-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(ClassOfTest)
+ DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
virtual void PrintDataTo(StringStream* stream);
};
@@ -871,7 +745,7 @@ class LCmpT: public LTemplateInstruction<1, 2, 0> {
}
DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
- DECLARE_HYDROGEN_ACCESSOR(Compare)
+ DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
Token::Value op() const { return hydrogen()->token(); }
};
@@ -1000,7 +874,7 @@ class LBranch: public LControlInstruction<1, 0> {
}
DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
- DECLARE_HYDROGEN_ACCESSOR(Value)
+ DECLARE_HYDROGEN_ACCESSOR(Branch)
virtual void PrintDataTo(StringStream* stream);
};
@@ -1979,21 +1853,6 @@ class LTypeof: public LTemplateInstruction<1, 1, 0> {
};
-class LTypeofIs: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LTypeofIs(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(TypeofIs, "typeof-is")
- DECLARE_HYDROGEN_ACCESSOR(TypeofIs)
-
- Handle<String> type_literal() { return hydrogen()->type_literal(); }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
public:
explicit LTypeofIsAndBranch(LOperand* value) {
@@ -2001,7 +1860,7 @@ class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
}
DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(TypeofIs)
+ DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
Handle<String> type_literal() { return hydrogen()->type_literal(); }
@@ -2009,13 +1868,6 @@ class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
};
-class LIsConstructCall: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(IsConstructCall, "is-construct-call")
- DECLARE_HYDROGEN_ACCESSOR(IsConstructCall)
-};
-
-
class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
public:
explicit LIsConstructCallAndBranch(LOperand* temp) {
diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc
index e23bad268..24e2044f2 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.cc
+++ b/deps/v8/src/arm/lithium-codegen-arm.cc
@@ -1531,7 +1531,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
- Representation r = instr->hydrogen()->representation();
+ Representation r = instr->hydrogen()->value()->representation();
if (r.IsInteger32()) {
Register reg = ToRegister(instr->InputAt(0));
__ cmp(reg, Operand(0));
@@ -1547,7 +1547,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
} else {
ASSERT(r.IsTagged());
Register reg = ToRegister(instr->InputAt(0));
- if (instr->hydrogen()->type().IsBoolean()) {
+ if (instr->hydrogen()->value()->type().IsBoolean()) {
__ LoadRoot(ip, Heap::kTrueValueRootIndex);
__ cmp(reg, ip);
EmitBranch(true_block, false_block, eq);
@@ -1645,34 +1645,6 @@ void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) {
}
-void LCodeGen::DoCmpID(LCmpID* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
- LOperand* result = instr->result();
- Register scratch = scratch0();
-
- Label unordered, done;
- if (instr->is_double()) {
- // Compare left and right as doubles and load the
- // resulting flags into the normal status register.
- __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
- // If a NaN is involved, i.e. the result is unordered (V set),
- // jump to unordered to return false.
- __ b(vs, &unordered);
- } else {
- EmitCmpI(left, right);
- }
-
- Condition cc = TokenToCondition(instr->op(), instr->is_double());
- __ LoadRoot(ToRegister(result), Heap::kTrueValueRootIndex);
- __ b(cc, &done);
-
- __ bind(&unordered);
- __ LoadRoot(ToRegister(result), Heap::kFalseValueRootIndex);
- __ bind(&done);
-}
-
-
void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
LOperand* left = instr->InputAt(0);
LOperand* right = instr->InputAt(1);
@@ -1695,17 +1667,6 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
}
-void LCodeGen::DoCmpObjectEq(LCmpObjectEq* instr) {
- Register left = ToRegister(instr->InputAt(0));
- Register right = ToRegister(instr->InputAt(1));
- Register result = ToRegister(instr->result());
-
- __ cmp(left, Operand(right));
- __ LoadRoot(result, Heap::kTrueValueRootIndex, eq);
- __ LoadRoot(result, Heap::kFalseValueRootIndex, ne);
-}
-
-
void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
Register left = ToRegister(instr->InputAt(0));
Register right = ToRegister(instr->InputAt(1));
@@ -1717,17 +1678,6 @@ void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
}
-void LCodeGen::DoCmpConstantEq(LCmpConstantEq* instr) {
- Register left = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
-
- Label done;
- __ cmp(left, Operand(instr->hydrogen()->right()));
- __ LoadRoot(result, Heap::kTrueValueRootIndex, eq);
- __ LoadRoot(result, Heap::kFalseValueRootIndex, ne);
-}
-
-
void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
Register left = ToRegister(instr->InputAt(0));
int true_block = chunk_->LookupDestination(instr->true_block_id());
@@ -1738,39 +1688,6 @@ void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
}
-void LCodeGen::DoIsNull(LIsNull* instr) {
- Register reg = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
-
- __ LoadRoot(ip, Heap::kNullValueRootIndex);
- __ cmp(reg, ip);
- if (instr->is_strict()) {
- __ LoadRoot(result, Heap::kTrueValueRootIndex, eq);
- __ LoadRoot(result, Heap::kFalseValueRootIndex, ne);
- } else {
- Label true_value, false_value, done;
- __ b(eq, &true_value);
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(ip, reg);
- __ b(eq, &true_value);
- __ JumpIfSmi(reg, &false_value);
- // Check for undetectable objects by looking in the bit field in
- // the map. The object has already been smi checked.
- Register scratch = result;
- __ ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
- __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
- __ tst(scratch, Operand(1 << Map::kIsUndetectable));
- __ b(ne, &true_value);
- __ bind(&false_value);
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ jmp(&done);
- __ bind(&true_value);
- __ LoadRoot(result, Heap::kTrueValueRootIndex);
- __ bind(&done);
- }
-}
-
-
void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
Register scratch = scratch0();
Register reg = ToRegister(instr->InputAt(0));
@@ -1830,25 +1747,6 @@ Condition LCodeGen::EmitIsObject(Register input,
}
-void LCodeGen::DoIsObject(LIsObject* instr) {
- Register reg = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
- Label is_false, is_true, done;
-
- Condition true_cond = EmitIsObject(reg, result, &is_false, &is_true);
- __ b(true_cond, &is_true);
-
- __ bind(&is_false);
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ b(&done);
-
- __ bind(&is_true);
- __ LoadRoot(result, Heap::kTrueValueRootIndex);
-
- __ bind(&done);
-}
-
-
void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
Register reg = ToRegister(instr->InputAt(0));
Register temp1 = ToRegister(instr->TempAt(0));
@@ -1866,18 +1764,6 @@ void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
}
-void LCodeGen::DoIsSmi(LIsSmi* instr) {
- ASSERT(instr->hydrogen()->value()->representation().IsTagged());
- Register result = ToRegister(instr->result());
- Register input_reg = EmitLoadRegister(instr->InputAt(0), ip);
- Label done;
- __ LoadRoot(result, Heap::kTrueValueRootIndex);
- __ JumpIfSmi(input_reg, &done);
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ bind(&done);
-}
-
-
void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -1888,25 +1774,6 @@ void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
}
-void LCodeGen::DoIsUndetectable(LIsUndetectable* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
-
- ASSERT(instr->hydrogen()->value()->representation().IsTagged());
- Label false_label, done;
- __ JumpIfSmi(input, &false_label);
- __ ldr(result, FieldMemOperand(input, HeapObject::kMapOffset));
- __ ldrb(result, FieldMemOperand(result, Map::kBitFieldOffset));
- __ tst(result, Operand(1 << Map::kIsUndetectable));
- __ b(eq, &false_label);
- __ LoadRoot(result, Heap::kTrueValueRootIndex);
- __ jmp(&done);
- __ bind(&false_label);
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ bind(&done);
-}
-
-
void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
Register input = ToRegister(instr->InputAt(0));
Register temp = ToRegister(instr->TempAt(0));
@@ -1922,7 +1789,7 @@ void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
}
-static InstanceType TestType(HHasInstanceType* instr) {
+static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
InstanceType from = instr->from();
InstanceType to = instr->to();
if (from == FIRST_TYPE) return to;
@@ -1931,7 +1798,7 @@ static InstanceType TestType(HHasInstanceType* instr) {
}
-static Condition BranchCondition(HHasInstanceType* instr) {
+static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
InstanceType from = instr->from();
InstanceType to = instr->to();
if (from == to) return eq;
@@ -1942,23 +1809,6 @@ static Condition BranchCondition(HHasInstanceType* instr) {
}
-void LCodeGen::DoHasInstanceType(LHasInstanceType* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
-
- ASSERT(instr->hydrogen()->value()->representation().IsTagged());
- Label done;
- __ tst(input, Operand(kSmiTagMask));
- __ LoadRoot(result, Heap::kFalseValueRootIndex, eq);
- __ b(eq, &done);
- __ CompareObjectType(input, result, result, TestType(instr->hydrogen()));
- Condition cond = BranchCondition(instr->hydrogen());
- __ LoadRoot(result, Heap::kTrueValueRootIndex, cond);
- __ LoadRoot(result, Heap::kFalseValueRootIndex, NegateCondition(cond));
- __ bind(&done);
-}
-
-
void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
Register scratch = scratch0();
Register input = ToRegister(instr->InputAt(0));
@@ -1988,20 +1838,6 @@ void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
}
-void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
- Register scratch = scratch0();
-
- ASSERT(instr->hydrogen()->value()->representation().IsTagged());
- __ ldr(scratch,
- FieldMemOperand(input, String::kHashFieldOffset));
- __ tst(scratch, Operand(String::kContainsCachedArrayIndexMask));
- __ LoadRoot(result, Heap::kTrueValueRootIndex, eq);
- __ LoadRoot(result, Heap::kFalseValueRootIndex, ne);
-}
-
-
void LCodeGen::DoHasCachedArrayIndexAndBranch(
LHasCachedArrayIndexAndBranch* instr) {
Register input = ToRegister(instr->InputAt(0));
@@ -2074,27 +1910,6 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
}
-void LCodeGen::DoClassOfTest(LClassOfTest* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
- ASSERT(input.is(result));
- Handle<String> class_name = instr->hydrogen()->class_name();
-
- Label done, is_true, is_false;
-
- EmitClassOfTest(&is_true, &is_false, class_name, input, scratch0(), input);
- __ b(ne, &is_false);
-
- __ bind(&is_true);
- __ LoadRoot(result, Heap::kTrueValueRootIndex);
- __ jmp(&done);
-
- __ bind(&is_false);
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ bind(&done);
-}
-
-
void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
Register input = ToRegister(instr->InputAt(0));
Register temp = scratch0();
@@ -4349,29 +4164,6 @@ void LCodeGen::DoTypeof(LTypeof* instr) {
}
-void LCodeGen::DoTypeofIs(LTypeofIs* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
- Label true_label;
- Label false_label;
- Label done;
-
- Condition final_branch_condition = EmitTypeofIs(&true_label,
- &false_label,
- input,
- instr->type_literal());
- __ b(final_branch_condition, &true_label);
- __ bind(&false_label);
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ b(&done);
-
- __ bind(&true_label);
- __ LoadRoot(result, Heap::kTrueValueRootIndex);
-
- __ bind(&done);
-}
-
-
void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
Register input = ToRegister(instr->InputAt(0));
int true_block = chunk_->LookupDestination(instr->true_block_id());
@@ -4455,26 +4247,6 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
}
-void LCodeGen::DoIsConstructCall(LIsConstructCall* instr) {
- Register result = ToRegister(instr->result());
- Label true_label;
- Label false_label;
- Label done;
-
- EmitIsConstructCall(result, scratch0());
- __ b(eq, &true_label);
-
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ b(&done);
-
-
- __ bind(&true_label);
- __ LoadRoot(result, Heap::kTrueValueRootIndex);
-
- __ bind(&done);
-}
-
-
void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
Register temp1 = ToRegister(instr->TempAt(0));
int true_block = chunk_->LookupDestination(instr->true_block_id());
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index 49282b871..08a1cb945 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -91,7 +91,7 @@ void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
}
-void MacroAssembler::Jump(byte* target, RelocInfo::Mode rmode,
+void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode,
Condition cond) {
ASSERT(!RelocInfo::IsCodeTarget(rmode));
Jump(reinterpret_cast<intptr_t>(target), rmode, cond);
@@ -118,10 +118,8 @@ int MacroAssembler::CallSize(Register target, Condition cond) {
void MacroAssembler::Call(Register target, Condition cond) {
// Block constant pool for the call instruction sequence.
BlockConstPoolScope block_const_pool(this);
-#ifdef DEBUG
- int pre_position = pc_offset();
-#endif
-
+ Label start;
+ bind(&start);
#if USE_BLX
blx(target, cond);
#else
@@ -129,34 +127,29 @@ void MacroAssembler::Call(Register target, Condition cond) {
mov(lr, Operand(pc), LeaveCC, cond);
mov(pc, Operand(target), LeaveCC, cond);
#endif
-
-#ifdef DEBUG
- int post_position = pc_offset();
- CHECK_EQ(pre_position + CallSize(target, cond), post_position);
-#endif
+ ASSERT_EQ(CallSize(target, cond), SizeOfCodeGeneratedSince(&start));
}
int MacroAssembler::CallSize(
- intptr_t target, RelocInfo::Mode rmode, Condition cond) {
+ Address target, RelocInfo::Mode rmode, Condition cond) {
int size = 2 * kInstrSize;
Instr mov_instr = cond | MOV | LeaveCC;
- if (!Operand(target, rmode).is_single_instruction(mov_instr)) {
+ intptr_t immediate = reinterpret_cast<intptr_t>(target);
+ if (!Operand(immediate, rmode).is_single_instruction(mov_instr)) {
size += kInstrSize;
}
return size;
}
-void MacroAssembler::Call(intptr_t target,
+void MacroAssembler::Call(Address target,
RelocInfo::Mode rmode,
Condition cond) {
// Block constant pool for the call instruction sequence.
BlockConstPoolScope block_const_pool(this);
-#ifdef DEBUG
- int pre_position = pc_offset();
-#endif
-
+ Label start;
+ bind(&start);
#if USE_BLX
// On ARMv5 and after the recommended call sequence is:
// ldr ip, [pc, #...]
@@ -168,7 +161,7 @@ void MacroAssembler::Call(intptr_t target,
// we have to do it explicitly.
positions_recorder()->WriteRecordedPositions();
- mov(ip, Operand(target, rmode));
+ mov(ip, Operand(reinterpret_cast<int32_t>(target), rmode));
blx(ip, cond);
ASSERT(kCallTargetAddressOffset == 2 * kInstrSize);
@@ -176,82 +169,36 @@ void MacroAssembler::Call(intptr_t target,
// Set lr for return at current pc + 8.
mov(lr, Operand(pc), LeaveCC, cond);
// Emit a ldr<cond> pc, [pc + offset of target in constant pool].
- mov(pc, Operand(target, rmode), LeaveCC, cond);
+ mov(pc, Operand(reinterpret_cast<int32_t>(target), rmode), LeaveCC, cond);
ASSERT(kCallTargetAddressOffset == kInstrSize);
#endif
-
-#ifdef DEBUG
- int post_position = pc_offset();
- CHECK_EQ(pre_position + CallSize(target, rmode, cond), post_position);
-#endif
+ ASSERT_EQ(CallSize(target, rmode, cond), SizeOfCodeGeneratedSince(&start));
}
-int MacroAssembler::CallSize(
- byte* target, RelocInfo::Mode rmode, Condition cond) {
- return CallSize(reinterpret_cast<intptr_t>(target), rmode);
-}
-
-
-void MacroAssembler::Call(
- byte* target, RelocInfo::Mode rmode, Condition cond) {
-#ifdef DEBUG
- int pre_position = pc_offset();
-#endif
-
- ASSERT(!RelocInfo::IsCodeTarget(rmode));
- Call(reinterpret_cast<intptr_t>(target), rmode, cond);
-
-#ifdef DEBUG
- int post_position = pc_offset();
- CHECK_EQ(pre_position + CallSize(target, rmode, cond), post_position);
-#endif
-}
-
-
-int MacroAssembler::CallSize(
- Handle<Code> code, RelocInfo::Mode rmode, Condition cond) {
- return CallSize(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
-}
-
-
-void MacroAssembler::CallWithAstId(Handle<Code> code,
- RelocInfo::Mode rmode,
- unsigned ast_id,
- Condition cond) {
-#ifdef DEBUG
- int pre_position = pc_offset();
-#endif
-
- ASSERT(rmode == RelocInfo::CODE_TARGET_WITH_ID);
- ASSERT(ast_id != kNoASTId);
- ASSERT(ast_id_for_reloc_info_ == kNoASTId);
- ast_id_for_reloc_info_ = ast_id;
- // 'code' is always generated ARM code, never THUMB code
- Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
-
-#ifdef DEBUG
- int post_position = pc_offset();
- CHECK_EQ(pre_position + CallSize(code, rmode, cond), post_position);
-#endif
+int MacroAssembler::CallSize(Handle<Code> code,
+ RelocInfo::Mode rmode,
+ unsigned ast_id,
+ Condition cond) {
+ return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond);
}
void MacroAssembler::Call(Handle<Code> code,
RelocInfo::Mode rmode,
+ unsigned ast_id,
Condition cond) {
-#ifdef DEBUG
- int pre_position = pc_offset();
-#endif
-
+ Label start;
+ bind(&start);
ASSERT(RelocInfo::IsCodeTarget(rmode));
+ if (rmode == RelocInfo::CODE_TARGET && ast_id != kNoASTId) {
+ ASSERT(ast_id_for_reloc_info_ == kNoASTId);
+ ast_id_for_reloc_info_ = ast_id;
+ rmode = RelocInfo::CODE_TARGET_WITH_ID;
+ }
// 'code' is always generated ARM code, never THUMB code
- Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
-
-#ifdef DEBUG
- int post_position = pc_offset();
- CHECK_EQ(pre_position + CallSize(code, rmode, cond), post_position);
-#endif
+ Call(reinterpret_cast<Address>(code.location()), rmode, cond);
+ ASSERT_EQ(CallSize(code, rmode, cond), SizeOfCodeGeneratedSince(&start));
}
@@ -994,9 +941,9 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
Handle<Code> adaptor =
isolate()->builtins()->ArgumentsAdaptorTrampoline();
if (flag == CALL_FUNCTION) {
- call_wrapper.BeforeCall(CallSize(adaptor, RelocInfo::CODE_TARGET));
+ call_wrapper.BeforeCall(CallSize(adaptor));
SetCallKind(r5, call_kind);
- Call(adaptor, RelocInfo::CODE_TARGET);
+ Call(adaptor);
call_wrapper.AfterCall();
b(done);
} else {
@@ -1719,7 +1666,7 @@ void MacroAssembler::CheckFastElements(Register map,
Register scratch,
Label* fail) {
STATIC_ASSERT(JSObject::FAST_ELEMENTS == 0);
- ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
cmp(scratch, Operand(Map::kMaximumBitField2FastElementValue));
b(hi, fail);
}
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index c601f26b7..1918858eb 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -90,19 +90,19 @@ class MacroAssembler: public Assembler {
// Jump, Call, and Ret pseudo instructions implementing inter-working.
void Jump(Register target, Condition cond = al);
- void Jump(byte* target, RelocInfo::Mode rmode, Condition cond = al);
+ void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
int CallSize(Register target, Condition cond = al);
void Call(Register target, Condition cond = al);
- int CallSize(byte* target, RelocInfo::Mode rmode, Condition cond = al);
- void Call(byte* target, RelocInfo::Mode rmode, Condition cond = al);
- int CallSize(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
+ int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al);
+ void Call(Address target, RelocInfo::Mode rmode, Condition cond = al);
+ int CallSize(Handle<Code> code,
+ RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
+ unsigned ast_id = kNoASTId,
+ Condition cond = al);
void Call(Handle<Code> code,
- RelocInfo::Mode rmode,
- Condition cond = al);
- void CallWithAstId(Handle<Code> code,
- RelocInfo::Mode rmode,
- unsigned ast_id,
+ RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
+ unsigned ast_id = kNoASTId,
Condition cond = al);
void Ret(Condition cond = al);
@@ -1036,10 +1036,6 @@ class MacroAssembler: public Assembler {
int num_double_arguments);
void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
- int CallSize(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
- void Call(intptr_t target,
- RelocInfo::Mode rmode,
- Condition cond = al);
// Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected,
diff --git a/deps/v8/src/array.js b/deps/v8/src/array.js
index 34ebd4e22..60cf3f0c5 100644
--- a/deps/v8/src/array.js
+++ b/deps/v8/src/array.js
@@ -742,14 +742,15 @@ function ArraySort(comparefn) {
else return x < y ? -1 : 1;
};
}
- var global_receiver = %GetGlobalReceiver();
+ var receiver =
+ %_IsNativeOrStrictMode(comparefn) ? void 0 : %GetGlobalReceiver();
function InsertionSort(a, from, to) {
for (var i = from + 1; i < to; i++) {
var element = a[i];
for (var j = i - 1; j >= from; j--) {
var tmp = a[j];
- var order = %_CallFunction(global_receiver, tmp, element, comparefn);
+ var order = %_CallFunction(receiver, tmp, element, comparefn);
if (order > 0) {
a[j + 1] = tmp;
} else {
@@ -771,14 +772,14 @@ function ArraySort(comparefn) {
var v1 = a[to - 1];
var middle_index = from + ((to - from) >> 1);
var v2 = a[middle_index];
- var c01 = %_CallFunction(global_receiver, v0, v1, comparefn);
+ var c01 = %_CallFunction(receiver, v0, v1, comparefn);
if (c01 > 0) {
// v1 < v0, so swap them.
var tmp = v0;
v0 = v1;
v1 = tmp;
} // v0 <= v1.
- var c02 = %_CallFunction(global_receiver, v0, v2, comparefn);
+ var c02 = %_CallFunction(receiver, v0, v2, comparefn);
if (c02 >= 0) {
// v2 <= v0 <= v1.
var tmp = v0;
@@ -787,7 +788,7 @@ function ArraySort(comparefn) {
v1 = tmp;
} else {
// v0 <= v1 && v0 < v2
- var c12 = %_CallFunction(global_receiver, v1, v2, comparefn);
+ var c12 = %_CallFunction(receiver, v1, v2, comparefn);
if (c12 > 0) {
// v0 <= v2 < v1
var tmp = v1;
@@ -808,7 +809,7 @@ function ArraySort(comparefn) {
// From i to high_start are elements that haven't been compared yet.
partition: for (var i = low_end + 1; i < high_start; i++) {
var element = a[i];
- var order = %_CallFunction(global_receiver, element, pivot, comparefn);
+ var order = %_CallFunction(receiver, element, pivot, comparefn);
if (order < 0) {
%_SwapElements(a, i, low_end);
low_end++;
@@ -817,7 +818,7 @@ function ArraySort(comparefn) {
high_start--;
if (high_start == i) break partition;
var top_elem = a[high_start];
- order = %_CallFunction(global_receiver, top_elem, pivot, comparefn);
+ order = %_CallFunction(receiver, top_elem, pivot, comparefn);
} while (order > 0);
%_SwapElements(a, i, high_start);
if (order < 0) {
diff --git a/deps/v8/src/ast.h b/deps/v8/src/ast.h
index 045404a49..805a40653 100644
--- a/deps/v8/src/ast.h
+++ b/deps/v8/src/ast.h
@@ -772,20 +772,26 @@ class TryStatement: public Statement {
class TryCatchStatement: public TryStatement {
public:
- TryCatchStatement(Block* try_block, Handle<String> name, Block* catch_block)
+ TryCatchStatement(Block* try_block,
+ Scope* scope,
+ Variable* variable,
+ Block* catch_block)
: TryStatement(try_block),
- name_(name),
+ scope_(scope),
+ variable_(variable),
catch_block_(catch_block) {
}
DECLARE_NODE_TYPE(TryCatchStatement)
+ Scope* scope() { return scope_; }
+ Variable* variable() { return variable_; }
Block* catch_block() const { return catch_block_; }
- Handle<String> name() const { return name_; }
virtual bool IsInlineable() const;
private:
- Handle<String> name_;
+ Scope* scope_;
+ Variable* variable_;
Block* catch_block_;
};
diff --git a/deps/v8/src/compilation-cache.cc b/deps/v8/src/compilation-cache.cc
index 5bd8bf31c..28e833a49 100644
--- a/deps/v8/src/compilation-cache.cc
+++ b/deps/v8/src/compilation-cache.cc
@@ -52,8 +52,7 @@ CompilationCache::CompilationCache(Isolate* isolate)
eval_global_(isolate, kEvalGlobalGenerations),
eval_contextual_(isolate, kEvalContextualGenerations),
reg_exp_(isolate, kRegExpGenerations),
- enabled_(true),
- eager_optimizing_set_(NULL) {
+ enabled_(true) {
CompilationSubCache* subcaches[kSubCacheCount] =
{&script_, &eval_global_, &eval_contextual_, &reg_exp_};
for (int i = 0; i < kSubCacheCount; ++i) {
@@ -62,10 +61,7 @@ CompilationCache::CompilationCache(Isolate* isolate)
}
-CompilationCache::~CompilationCache() {
- delete eager_optimizing_set_;
- eager_optimizing_set_ = NULL;
-}
+CompilationCache::~CompilationCache() {}
static Handle<CompilationCacheTable> AllocateTable(Isolate* isolate, int size) {
@@ -457,47 +453,6 @@ void CompilationCache::PutRegExp(Handle<String> source,
}
-static bool SourceHashCompare(void* key1, void* key2) {
- return key1 == key2;
-}
-
-
-HashMap* CompilationCache::EagerOptimizingSet() {
- if (eager_optimizing_set_ == NULL) {
- eager_optimizing_set_ = new HashMap(&SourceHashCompare);
- }
- return eager_optimizing_set_;
-}
-
-
-bool CompilationCache::ShouldOptimizeEagerly(Handle<JSFunction> function) {
- if (FLAG_opt_eagerly) return true;
- uint32_t hash = function->SourceHash();
- void* key = reinterpret_cast<void*>(hash);
- return EagerOptimizingSet()->Lookup(key, hash, false) != NULL;
-}
-
-
-void CompilationCache::MarkForEagerOptimizing(Handle<JSFunction> function) {
- uint32_t hash = function->SourceHash();
- void* key = reinterpret_cast<void*>(hash);
- EagerOptimizingSet()->Lookup(key, hash, true);
-}
-
-
-void CompilationCache::MarkForLazyOptimizing(Handle<JSFunction> function) {
- uint32_t hash = function->SourceHash();
- void* key = reinterpret_cast<void*>(hash);
- EagerOptimizingSet()->Remove(key, hash);
-}
-
-
-void CompilationCache::ResetEagerOptimizingData() {
- HashMap* set = EagerOptimizingSet();
- if (set->occupancy() > 0) set->Clear();
-}
-
-
void CompilationCache::Clear() {
for (int i = 0; i < kSubCacheCount; i++) {
subcaches_[i]->Clear();
diff --git a/deps/v8/src/compilation-cache.h b/deps/v8/src/compilation-cache.h
index 887d4e84e..1fcf75313 100644
--- a/deps/v8/src/compilation-cache.h
+++ b/deps/v8/src/compilation-cache.h
@@ -223,14 +223,6 @@ class CompilationCache {
JSRegExp::Flags flags,
Handle<FixedArray> data);
- // Support for eager optimization tracking.
- bool ShouldOptimizeEagerly(Handle<JSFunction> function);
- void MarkForEagerOptimizing(Handle<JSFunction> function);
- void MarkForLazyOptimizing(Handle<JSFunction> function);
-
- // Reset the eager optimization tracking data.
- void ResetEagerOptimizingData();
-
// Clear the cache - also used to initialize the cache at startup.
void Clear();
@@ -274,8 +266,6 @@ class CompilationCache {
// Current enable state of the compilation cache.
bool enabled_;
- HashMap* eager_optimizing_set_;
-
friend class Isolate;
DISALLOW_COPY_AND_ASSIGN(CompilationCache);
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index f8d1b3de6..e09b72f7d 100755
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -109,8 +109,6 @@ void CompilationInfo::DisableOptimization() {
void CompilationInfo::AbortOptimization() {
Handle<Code> code(shared_info()->code());
SetCode(code);
- Isolate* isolate = code->GetIsolate();
- isolate->compilation_cache()->MarkForLazyOptimizing(closure());
}
@@ -413,7 +411,8 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
String::cast(script->name())));
GDBJIT(AddCode(Handle<String>(String::cast(script->name())),
script,
- info->code()));
+ info->code(),
+ info));
} else {
PROFILE(isolate, CodeCreateEvent(
info->is_eval()
@@ -422,7 +421,7 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
*info->code(),
*result,
isolate->heap()->empty_string()));
- GDBJIT(AddCode(Handle<String>(), script, info->code()));
+ GDBJIT(AddCode(Handle<String>(), script, info->code(), info));
}
// Hint to the runtime system used when allocating space for initial
@@ -618,6 +617,7 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info, shared);
if (info->IsOptimizing()) {
+ ASSERT(shared->scope_info() != SerializedScopeInfo::Empty());
function->ReplaceCode(*code);
} else {
// Update the shared function info with the compiled code and the
@@ -659,9 +659,6 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
CompilationInfo optimized(function);
optimized.SetOptimizing(AstNode::kNoNumber);
return CompileLazy(&optimized);
- } else if (isolate->compilation_cache()->ShouldOptimizeEagerly(
- function)) {
- isolate->runtime_profiler()->OptimizeSoon(*function);
}
}
}
@@ -788,7 +785,8 @@ void Compiler::RecordFunctionCompilation(Logger::LogEventsAndTags tag,
GDBJIT(AddCode(Handle<String>(shared->DebugName()),
Handle<Script>(info->script()),
- Handle<Code>(info->code())));
+ Handle<Code>(info->code()),
+ info));
}
} } // namespace v8::internal
diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h
index da6e08875..0f3d44cc2 100644
--- a/deps/v8/src/contexts.h
+++ b/deps/v8/src/contexts.h
@@ -225,7 +225,6 @@ class Context: public FixedArray {
OPAQUE_REFERENCE_FUNCTION_INDEX,
CONTEXT_EXTENSION_FUNCTION_INDEX,
OUT_OF_MEMORY_INDEX,
- MAP_CACHE_INDEX,
CONTEXT_DATA_INDEX,
ALLOW_CODE_GEN_FROM_STRINGS_INDEX,
DERIVED_GET_TRAP_INDEX,
@@ -234,6 +233,7 @@ class Context: public FixedArray {
// Properties from here are treated as weak references by the full GC.
// Scavenge treats them as strong references.
OPTIMIZED_FUNCTIONS_LIST, // Weak.
+ MAP_CACHE_INDEX, // Weak.
NEXT_CONTEXT_LINK, // Weak.
// Total number of slots.
diff --git a/deps/v8/src/date.js b/deps/v8/src/date.js
index 5a2e9a234..79b846d4a 100644
--- a/deps/v8/src/date.js
+++ b/deps/v8/src/date.js
@@ -981,11 +981,22 @@ function PadInt(n, digits) {
function DateToISOString() {
var t = DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) return kInvalidDate;
- return this.getUTCFullYear() +
+ var year = this.getUTCFullYear();
+ var year_string;
+ if (year >= 0 && year <= 9999) {
+ year_string = PadInt(year, 4);
+ } else {
+ if (year < 0) {
+ year_string = "-" + PadInt(-year, 6);
+ } else {
+ year_string = "+" + PadInt(year, 6);
+ }
+ }
+ return year_string +
'-' + PadInt(this.getUTCMonth() + 1, 2) +
- '-' + PadInt(this.getUTCDate(), 2) +
+ '-' + PadInt(this.getUTCDate(), 2) +
'T' + PadInt(this.getUTCHours(), 2) +
- ':' + PadInt(this.getUTCMinutes(), 2) +
+ ':' + PadInt(this.getUTCMinutes(), 2) +
':' + PadInt(this.getUTCSeconds(), 2) +
'.' + PadInt(this.getUTCMilliseconds(), 3) +
'Z';
@@ -995,8 +1006,8 @@ function DateToISOString() {
function DateToJSON(key) {
var o = ToObject(this);
var tv = DefaultNumber(o);
- if (IS_NUMBER(tv) && !NUMBER_IS_FINITE(tv)) {
- return null;
+ if (IS_NUMBER(tv) && !NUMBER_IS_FINITE(tv)) {
+ return null;
}
return o.toISOString();
}
diff --git a/deps/v8/src/dateparser-inl.h b/deps/v8/src/dateparser-inl.h
index 7f8fac83e..32f0f9ea8 100644
--- a/deps/v8/src/dateparser-inl.h
+++ b/deps/v8/src/dateparser-inl.h
@@ -39,16 +39,71 @@ bool DateParser::Parse(Vector<Char> str,
UnicodeCache* unicode_cache) {
ASSERT(out->length() >= OUTPUT_SIZE);
InputReader<Char> in(unicode_cache, str);
+ DateStringTokenizer<Char> scanner(&in);
TimeZoneComposer tz;
TimeComposer time;
DayComposer day;
- while (!in.IsEnd()) {
- if (in.IsAsciiDigit()) {
- // Parse a number (possibly with 1 or 2 trailing colons).
- int n = in.ReadUnsignedNumber();
- if (in.Skip(':')) {
- if (in.Skip(':')) {
+ // Specification:
+ // Accept ES5 ISO 8601 date-time-strings or legacy dates compatible
+ // with Safari.
+ // ES5 ISO 8601 dates:
+ // [('-'|'+')yy]yyyy[-MM[-DD]][THH:mm[:ss[.sss]][Z|(+|-)hh:mm]]
+ // where yyyy is in the range 0000..9999 and
+ // +/-yyyyyy is in the range -999999..+999999 -
+ // but -000000 is invalid (year zero must be positive),
+ // MM is in the range 01..12,
+ // DD is in the range 01..31,
+ // MM and DD defaults to 01 if missing,,
+ // HH is generally in the range 00..23, but can be 24 if mm, ss
+ // and sss are zero (or missing), representing midnight at the
+ // end of a day,
+ // mm and ss are in the range 00..59,
+ // sss is in the range 000..999,
+ // hh is in the range 00..23,
+ // mm, ss, and sss default to 00 if missing, and
+ // timezone defaults to Z if missing.
+ // Extensions:
+ // We also allow sss to have more or less than three digits (but at
+ // least one).
+ // We allow hh:mm to be specified as hhmm.
+ // Legacy dates:
+ // Any unrecognized word before the first number is ignored.
+ // Parenthesized text is ignored.
+ // An unsigned number followed by ':' is a time value, and is
+ // added to the TimeComposer. A number followed by '::' adds a second
+ // zero as well. A number followed by '.' is also a time and must be
+ // followed by milliseconds.
+ // Any other number is a date component and is added to DayComposer.
+ // A month name (or really: any word having the same first three letters
+ // as a month name) is recorded as a named month in the Day composer.
+ // A word recognizable as a time-zone is recorded as such, as is
+ // '(+|-)(hhmm|hh:)'.
+ // Legacy dates don't allow extra signs ('+' or '-') or umatched ')'
+ // after a number has been read (before the first number, any garbage
+ // is allowed).
+ // Intersection of the two:
+ // A string that matches both formats (e.g. 1970-01-01) will be
+ // parsed as an ES5 date-time string - which means it will default
+ // to UTC time-zone. That's unavoidable if following the ES5
+ // specification.
+ // After a valid "T" has been read while scanning an ES5 datetime string,
+ // the input can no longer be a valid legacy date, since the "T" is a
+ // garbage string after a number has been read.
+
+ // First try getting as far as possible with as ES5 Date Time String.
+ DateToken next_unhandled_token = ParseES5DateTime(&scanner, &day, &time, &tz);
+ if (next_unhandled_token.IsInvalid()) return false;
+ bool has_read_number = !day.IsEmpty();
+ // If there's anything left, continue with the legacy parser.
+ for (DateToken token = next_unhandled_token;
+ !token.IsEndOfInput();
+ token = scanner.Next()) {
+ if (token.IsNumber()) {
+ has_read_number = true;
+ int n = token.number();
+ if (scanner.SkipSymbol(':')) {
+ if (scanner.SkipSymbol(':')) {
// n + "::"
if (!time.IsEmpty()) return false;
time.Add(n);
@@ -56,12 +111,13 @@ bool DateParser::Parse(Vector<Char> str,
} else {
// n + ":"
if (!time.Add(n)) return false;
- in.Skip('.');
+ if (scanner.Peek().IsSymbol('.')) scanner.Next();
}
- } else if (in.Skip('.') && time.IsExpecting(n)) {
+ } else if (scanner.SkipSymbol('.') && time.IsExpecting(n)) {
time.Add(n);
- if (!in.IsAsciiDigit()) return false;
- int n = in.ReadMilliseconds();
+ if (!scanner.Peek().IsNumber()) return false;
+ int n = ReadMilliseconds(scanner.Next());
+ if (n < 0) return false;
time.AddFinal(n);
} else if (tz.IsExpecting(n)) {
tz.SetAbsoluteMinute(n);
@@ -69,59 +125,206 @@ bool DateParser::Parse(Vector<Char> str,
time.AddFinal(n);
// Require end, white space, "Z", "+" or "-" immediately after
// finalizing time.
- if (!in.IsEnd() && !in.SkipWhiteSpace() && !in.Is('Z') &&
- !in.IsAsciiSign()) return false;
+ DateToken peek = scanner.Peek();
+ if (!peek.IsEndOfInput() &&
+ !peek.IsWhiteSpace() &&
+ !peek.IsKeywordZ() &&
+ !peek.IsAsciiSign()) return false;
} else {
if (!day.Add(n)) return false;
- in.Skip('-'); // Ignore suffix '-' for year, month, or day.
- // Skip trailing 'T' for ECMAScript 5 date string format but make
- // sure that it is followed by a digit (for the time).
- if (in.Skip('T') && !in.IsAsciiDigit()) return false;
+ scanner.SkipSymbol('-');
}
- } else if (in.IsAsciiAlphaOrAbove()) {
+ } else if (token.IsKeyword()) {
// Parse a "word" (sequence of chars. >= 'A').
- uint32_t pre[KeywordTable::kPrefixLength];
- int len = in.ReadWord(pre, KeywordTable::kPrefixLength);
- int index = KeywordTable::Lookup(pre, len);
- KeywordType type = KeywordTable::GetType(index);
-
+ KeywordType type = token.keyword_type();
+ int value = token.keyword_value();
if (type == AM_PM && !time.IsEmpty()) {
- time.SetHourOffset(KeywordTable::GetValue(index));
+ time.SetHourOffset(value);
} else if (type == MONTH_NAME) {
- day.SetNamedMonth(KeywordTable::GetValue(index));
- in.Skip('-'); // Ignore suffix '-' for month names
- } else if (type == TIME_ZONE_NAME && in.HasReadNumber()) {
- tz.Set(KeywordTable::GetValue(index));
+ day.SetNamedMonth(value);
+ scanner.SkipSymbol('-');
+ } else if (type == TIME_ZONE_NAME && has_read_number) {
+ tz.Set(value);
} else {
// Garbage words are illegal if a number has been read.
- if (in.HasReadNumber()) return false;
+ if (has_read_number) return false;
}
- } else if (in.IsAsciiSign() && (tz.IsUTC() || !time.IsEmpty())) {
+ } else if (token.IsAsciiSign() && (tz.IsUTC() || !time.IsEmpty())) {
// Parse UTC offset (only after UTC or time).
- tz.SetSign(in.GetAsciiSignValue());
- in.Next();
- int n = in.ReadUnsignedNumber();
- if (in.Skip(':')) {
+ tz.SetSign(token.ascii_sign());
+ // The following number may be empty.
+ int n = 0;
+ if (scanner.Peek().IsNumber()) {
+ n = scanner.Next().number();
+ }
+ has_read_number = true;
+
+ if (scanner.Peek().IsSymbol(':')) {
tz.SetAbsoluteHour(n);
tz.SetAbsoluteMinute(kNone);
} else {
tz.SetAbsoluteHour(n / 100);
tz.SetAbsoluteMinute(n % 100);
}
- } else if (in.Is('(')) {
- // Ignore anything from '(' to a matching ')' or end of string.
- in.SkipParentheses();
- } else if ((in.IsAsciiSign() || in.Is(')')) && in.HasReadNumber()) {
+ } else if ((token.IsAsciiSign() || token.IsSymbol(')')) &&
+ has_read_number) {
// Extra sign or ')' is illegal if a number has been read.
return false;
} else {
- // Ignore other characters.
- in.Next();
+ // Ignore other characters and whitespace.
}
}
+
return day.Write(out) && time.Write(out) && tz.Write(out);
}
+
+template<typename CharType>
+DateParser::DateToken DateParser::DateStringTokenizer<CharType>::Scan() {
+ int pre_pos = in_->position();
+ if (in_->IsEnd()) return DateToken::EndOfInput();
+ if (in_->IsAsciiDigit()) {
+ int n = in_->ReadUnsignedNumeral();
+ int length = in_->position() - pre_pos;
+ return DateToken::Number(n, length);
+ }
+ if (in_->Skip(':')) return DateToken::Symbol(':');
+ if (in_->Skip('-')) return DateToken::Symbol('-');
+ if (in_->Skip('+')) return DateToken::Symbol('+');
+ if (in_->Skip('.')) return DateToken::Symbol('.');
+ if (in_->Skip(')')) return DateToken::Symbol(')');
+ if (in_->IsAsciiAlphaOrAbove()) {
+ ASSERT(KeywordTable::kPrefixLength == 3);
+ uint32_t buffer[3] = {0, 0, 0};
+ int length = in_->ReadWord(buffer, 3);
+ int index = KeywordTable::Lookup(buffer, length);
+ return DateToken::Keyword(KeywordTable::GetType(index),
+ KeywordTable::GetValue(index),
+ length);
+ }
+ if (in_->SkipWhiteSpace()) {
+ return DateToken::WhiteSpace(in_->position() - pre_pos);
+ }
+ if (in_->SkipParentheses()) {
+ return DateToken::Unknown();
+ }
+ in_->Next();
+ return DateToken::Unknown();
+}
+
+
+template <typename Char>
+DateParser::DateToken DateParser::ParseES5DateTime(
+ DateStringTokenizer<Char>* scanner,
+ DayComposer* day,
+ TimeComposer* time,
+ TimeZoneComposer* tz) {
+ ASSERT(day->IsEmpty());
+ ASSERT(time->IsEmpty());
+ ASSERT(tz->IsEmpty());
+
+ // Parse mandatory date string: [('-'|'+')yy]yyyy[':'MM[':'DD]]
+ if (scanner->Peek().IsAsciiSign()) {
+ // Keep the sign token, so we can pass it back to the legacy
+ // parser if we don't use it.
+ DateToken sign_token = scanner->Next();
+ if (!scanner->Peek().IsFixedLengthNumber(6)) return sign_token;
+ int sign = sign_token.ascii_sign();
+ int year = scanner->Next().number();
+ if (sign < 0 && year == 0) return sign_token;
+ day->Add(sign * year);
+ } else if (scanner->Peek().IsFixedLengthNumber(4)) {
+ day->Add(scanner->Next().number());
+ } else {
+ return scanner->Next();
+ }
+ if (scanner->SkipSymbol('-')) {
+ if (!scanner->Peek().IsFixedLengthNumber(2) ||
+ !DayComposer::IsMonth(scanner->Peek().number())) return scanner->Next();
+ day->Add(scanner->Next().number());
+ if (scanner->SkipSymbol('-')) {
+ if (!scanner->Peek().IsFixedLengthNumber(2) ||
+ !DayComposer::IsDay(scanner->Peek().number())) return scanner->Next();
+ day->Add(scanner->Next().number());
+ }
+ }
+ // Check for optional time string: 'T'HH':'mm[':'ss['.'sss]]Z
+ if (!scanner->Peek().IsKeywordType(TIME_SEPARATOR)) {
+ if (!scanner->Peek().IsEndOfInput()) return scanner->Next();
+ } else {
+ // ES5 Date Time String time part is present.
+ scanner->Next();
+ if (!scanner->Peek().IsFixedLengthNumber(2) ||
+ !Between(scanner->Peek().number(), 0, 24)) {
+ return DateToken::Invalid();
+ }
+ // Allow 24:00[:00[.000]], but no other time starting with 24.
+ bool hour_is_24 = (scanner->Peek().number() == 24);
+ time->Add(scanner->Next().number());
+ if (!scanner->SkipSymbol(':')) return DateToken::Invalid();
+ if (!scanner->Peek().IsFixedLengthNumber(2) ||
+ !TimeComposer::IsMinute(scanner->Peek().number()) ||
+ (hour_is_24 && scanner->Peek().number() > 0)) {
+ return DateToken::Invalid();
+ }
+ time->Add(scanner->Next().number());
+ if (scanner->SkipSymbol(':')) {
+ if (!scanner->Peek().IsFixedLengthNumber(2) ||
+ !TimeComposer::IsSecond(scanner->Peek().number()) ||
+ (hour_is_24 && scanner->Peek().number() > 0)) {
+ return DateToken::Invalid();
+ }
+ time->Add(scanner->Next().number());
+ if (scanner->SkipSymbol('.')) {
+ if (!scanner->Peek().IsNumber() ||
+ (hour_is_24 && scanner->Peek().number() > 0)) {
+ return DateToken::Invalid();
+ }
+ // Allow more or less than the mandated three digits.
+ time->Add(ReadMilliseconds(scanner->Next()));
+ }
+ }
+ // Check for optional timezone designation: 'Z' | ('+'|'-')hh':'mm
+ if (scanner->Peek().IsKeywordZ()) {
+ scanner->Next();
+ tz->Set(0);
+ } else if (scanner->Peek().IsSymbol('+') ||
+ scanner->Peek().IsSymbol('-')) {
+ tz->SetSign(scanner->Next().symbol() == '+' ? 1 : -1);
+ if (scanner->Peek().IsFixedLengthNumber(4)) {
+ // hhmm extension syntax.
+ int hourmin = scanner->Next().number();
+ int hour = hourmin / 100;
+ int min = hourmin % 100;
+ if (!TimeComposer::IsHour(hour) || !TimeComposer::IsMinute(min)) {
+ return DateToken::Invalid();
+ }
+ tz->SetAbsoluteHour(hour);
+ tz->SetAbsoluteMinute(min);
+ } else {
+ // hh:mm standard syntax.
+ if (!scanner->Peek().IsFixedLengthNumber(2) ||
+ !TimeComposer::IsHour(scanner->Peek().number())) {
+ return DateToken::Invalid();
+ }
+ tz->SetAbsoluteHour(scanner->Next().number());
+ if (!scanner->SkipSymbol(':')) return DateToken::Invalid();
+ if (!scanner->Peek().IsFixedLengthNumber(2) ||
+ !TimeComposer::IsMinute(scanner->Peek().number())) {
+ return DateToken::Invalid();
+ }
+ tz->SetAbsoluteMinute(scanner->Next().number());
+ }
+ }
+ if (!scanner->Peek().IsEndOfInput()) return DateToken::Invalid();
+ }
+ // Successfully parsed ES5 Date Time String. Default to UTC if no TZ given.
+ if (tz->IsEmpty()) tz->Set(0);
+ day->set_iso_date();
+ return DateToken::EndOfInput();
+}
+
+
} } // namespace v8::internal
#endif // V8_DATEPARSER_INL_H_
diff --git a/deps/v8/src/dateparser.cc b/deps/v8/src/dateparser.cc
index 6d8048876..4a0721fe8 100644
--- a/deps/v8/src/dateparser.cc
+++ b/deps/v8/src/dateparser.cc
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -44,7 +44,7 @@ bool DateParser::DayComposer::Write(FixedArray* output) {
int day = kNone;
if (named_month_ == kNone) {
- if (index_ == 3 && !IsDay(comp_[0])) {
+ if (is_iso_date_ || (index_ == 3 && !IsDay(comp_[0]))) {
// YMD
year = comp_[0];
month = comp_[1];
@@ -71,8 +71,10 @@ bool DateParser::DayComposer::Write(FixedArray* output) {
}
}
- if (Between(year, 0, 49)) year += 2000;
- else if (Between(year, 50, 99)) year += 1900;
+ if (!is_iso_date_) {
+ if (Between(year, 0, 49)) year += 2000;
+ else if (Between(year, 50, 99)) year += 1900;
+ }
if (!Smi::IsValid(year) || !IsMonth(month) || !IsDay(day)) return false;
@@ -151,6 +153,7 @@ const int8_t DateParser::KeywordTable::
{'m', 's', 't', DateParser::TIME_ZONE_NAME, -7},
{'p', 'd', 't', DateParser::TIME_ZONE_NAME, -7},
{'p', 's', 't', DateParser::TIME_ZONE_NAME, -8},
+ {'t', '\0', '\0', DateParser::TIME_SEPARATOR, 0},
{'\0', '\0', '\0', DateParser::INVALID, 0},
};
@@ -175,4 +178,35 @@ int DateParser::KeywordTable::Lookup(const uint32_t* pre, int len) {
}
+int DateParser::ReadMilliseconds(DateToken token) {
+ // Read first three significant digits of the original numeral,
+ // as inferred from the value and the number of digits.
+ // I.e., use the number of digits to see if there were
+ // leading zeros.
+ int number = token.number();
+ int length = token.length();
+ if (length < 3) {
+ // Less than three digits. Multiply to put most significant digit
+ // in hundreds position.
+ if (length == 1) {
+ number *= 100;
+ } else if (length == 2) {
+ number *= 10;
+ }
+ } else if (length > 3) {
+ if (length > kMaxSignificantDigits) length = kMaxSignificantDigits;
+ // More than three digits. Divide by 10^(length - 3) to get three
+ // most significant digits.
+ int factor = 1;
+ do {
+ ASSERT(factor <= 100000000); // factor won't overflow.
+ factor *= 10;
+ length--;
+ } while (length > 3);
+ number /= factor;
+ }
+ return number;
+}
+
+
} } // namespace v8::internal
diff --git a/deps/v8/src/dateparser.h b/deps/v8/src/dateparser.h
index 6e87c3418..4bd320e90 100644
--- a/deps/v8/src/dateparser.h
+++ b/deps/v8/src/dateparser.h
@@ -61,9 +61,14 @@ class DateParser : public AllStatic {
static inline bool Between(int x, int lo, int hi) {
return static_cast<unsigned>(x - lo) <= static_cast<unsigned>(hi - lo);
}
+
// Indicates a missing value.
static const int kNone = kMaxInt;
+ // Maximal number of digits used to build the value of a numeral.
+ // Remaining digits are ignored.
+ static const int kMaxSignificantDigits = 9;
+
// InputReader provides basic string parsing and character classification.
template <typename Char>
class InputReader BASE_EMBEDDED {
@@ -71,32 +76,28 @@ class DateParser : public AllStatic {
InputReader(UnicodeCache* unicode_cache, Vector<Char> s)
: index_(0),
buffer_(s),
- has_read_number_(false),
unicode_cache_(unicode_cache) {
Next();
}
+ int position() { return index_; }
+
// Advance to the next character of the string.
- void Next() { ch_ = (index_ < buffer_.length()) ? buffer_[index_++] : 0; }
-
- // Read a string of digits as an unsigned number (cap just below kMaxInt).
- int ReadUnsignedNumber() {
- has_read_number_ = true;
- int n;
- for (n = 0; IsAsciiDigit() && n < kMaxInt / 10 - 1; Next()) {
- n = n * 10 + ch_ - '0';
- }
- return n;
+ void Next() {
+ ch_ = (index_ < buffer_.length()) ? buffer_[index_] : 0;
+ index_++;
}
- // Read a string of digits, take the first three or fewer as an unsigned
- // number of milliseconds, and ignore any digits after the first three.
- int ReadMilliseconds() {
- has_read_number_ = true;
+ // Read a string of digits as an unsigned number. Cap value at
+ // kMaxSignificantDigits, but skip remaining digits if the numeral
+ // is longer.
+ int ReadUnsignedNumeral() {
int n = 0;
- int power;
- for (power = 100; IsAsciiDigit(); Next(), power = power / 10) {
- n = n + power * (ch_ - '0');
+ int i = 0;
+ while (IsAsciiDigit()) {
+ if (i < kMaxSignificantDigits) n = n * 10 + ch_ - '0';
+ i++;
+ Next();
}
return n;
}
@@ -151,18 +152,138 @@ class DateParser : public AllStatic {
// Return 1 for '+' and -1 for '-'.
int GetAsciiSignValue() const { return 44 - static_cast<int>(ch_); }
- // Indicates whether any (possibly empty!) numbers have been read.
- bool HasReadNumber() const { return has_read_number_; }
-
private:
int index_;
Vector<Char> buffer_;
- bool has_read_number_;
uint32_t ch_;
UnicodeCache* unicode_cache_;
};
- enum KeywordType { INVALID, MONTH_NAME, TIME_ZONE_NAME, AM_PM };
+ enum KeywordType {
+ INVALID, MONTH_NAME, TIME_ZONE_NAME, TIME_SEPARATOR, AM_PM
+ };
+
+ struct DateToken {
+ public:
+ bool IsInvalid() { return tag_ == kInvalidTokenTag; }
+ bool IsUnknown() { return tag_ == kUnknownTokenTag; }
+ bool IsNumber() { return tag_ == kNumberTag; }
+ bool IsSymbol() { return tag_ == kSymbolTag; }
+ bool IsWhiteSpace() { return tag_ == kWhiteSpaceTag; }
+ bool IsEndOfInput() { return tag_ == kEndOfInputTag; }
+ bool IsKeyword() { return tag_ >= kKeywordTagStart; }
+
+ int length() { return length_; }
+
+ int number() {
+ ASSERT(IsNumber());
+ return value_;
+ }
+ KeywordType keyword_type() {
+ ASSERT(IsKeyword());
+ return static_cast<KeywordType>(tag_);
+ }
+ int keyword_value() {
+ ASSERT(IsKeyword());
+ return value_;
+ }
+ char symbol() {
+ ASSERT(IsSymbol());
+ return static_cast<char>(value_);
+ }
+ bool IsSymbol(char symbol) {
+ return IsSymbol() && this->symbol() == symbol;
+ }
+ bool IsKeywordType(KeywordType tag) {
+ return tag_ == tag;
+ }
+ bool IsFixedLengthNumber(int length) {
+ return IsNumber() && length_ == length;
+ }
+ bool IsAsciiSign() {
+ return tag_ == kSymbolTag && (value_ == '-' || value_ == '+');
+ }
+ int ascii_sign() {
+ ASSERT(IsAsciiSign());
+ return 44 - value_;
+ }
+ bool IsKeywordZ() {
+ return IsKeywordType(TIME_ZONE_NAME) && length_ == 1 && value_ == 0;
+ }
+ bool IsUnknown(int character) {
+ return IsUnknown() && value_ == character;
+ }
+ // Factory functions.
+ static DateToken Keyword(KeywordType tag, int value, int length) {
+ return DateToken(tag, length, value);
+ }
+ static DateToken Number(int value, int length) {
+ return DateToken(kNumberTag, length, value);
+ }
+ static DateToken Symbol(char symbol) {
+ return DateToken(kSymbolTag, 1, symbol);
+ }
+ static DateToken EndOfInput() {
+ return DateToken(kEndOfInputTag, 0, -1);
+ }
+ static DateToken WhiteSpace(int length) {
+ return DateToken(kWhiteSpaceTag, length, -1);
+ }
+ static DateToken Unknown() {
+ return DateToken(kUnknownTokenTag, 1, -1);
+ }
+ static DateToken Invalid() {
+ return DateToken(kInvalidTokenTag, 0, -1);
+ }
+ private:
+ enum TagType {
+ kInvalidTokenTag = -6,
+ kUnknownTokenTag = -5,
+ kWhiteSpaceTag = -4,
+ kNumberTag = -3,
+ kSymbolTag = -2,
+ kEndOfInputTag = -1,
+ kKeywordTagStart = 0
+ };
+ DateToken(int tag, int length, int value)
+ : tag_(tag),
+ length_(length),
+ value_(value) { }
+
+ int tag_;
+ int length_; // Number of characters.
+ int value_;
+ };
+
+ template <typename Char>
+ class DateStringTokenizer {
+ public:
+ explicit DateStringTokenizer(InputReader<Char>* in)
+ : in_(in), next_(Scan()) { }
+ DateToken Next() {
+ DateToken result = next_;
+ next_ = Scan();
+ return result;
+ }
+
+ DateToken Peek() {
+ return next_;
+ }
+ bool SkipSymbol(char symbol) {
+ if (next_.IsSymbol(symbol)) {
+ next_ = Scan();
+ return true;
+ }
+ return false;
+ }
+ private:
+ DateToken Scan();
+
+ InputReader<Char>* in_;
+ DateToken next_;
+ };
+
+ static int ReadMilliseconds(DateToken number);
// KeywordTable maps names of months, time zones, am/pm to numbers.
class KeywordTable : public AllStatic {
@@ -201,6 +322,7 @@ class DateParser : public AllStatic {
}
bool IsUTC() const { return hour_ == 0 && minute_ == 0; }
bool Write(FixedArray* output);
+ bool IsEmpty() { return hour_ == kNone; }
private:
int sign_;
int hour_;
@@ -228,10 +350,10 @@ class DateParser : public AllStatic {
bool Write(FixedArray* output);
static bool IsMinute(int x) { return Between(x, 0, 59); }
- private:
static bool IsHour(int x) { return Between(x, 0, 23); }
- static bool IsHour12(int x) { return Between(x, 0, 12); }
static bool IsSecond(int x) { return Between(x, 0, 59); }
+ private:
+ static bool IsHour12(int x) { return Between(x, 0, 12); }
static bool IsMillisecond(int x) { return Between(x, 0, 999); }
static const int kSize = 4;
@@ -242,22 +364,42 @@ class DateParser : public AllStatic {
class DayComposer BASE_EMBEDDED {
public:
- DayComposer() : index_(0), named_month_(kNone) {}
+ DayComposer() : index_(0), named_month_(kNone), is_iso_date_(false) {}
bool IsEmpty() const { return index_ == 0; }
bool Add(int n) {
- return index_ < kSize ? (comp_[index_++] = n, true) : false;
+ if (index_ < kSize) {
+ comp_[index_] = n;
+ index_++;
+ return true;
+ }
+ return false;
}
void SetNamedMonth(int n) { named_month_ = n; }
bool Write(FixedArray* output);
- private:
+ void set_iso_date() { is_iso_date_ = true; }
static bool IsMonth(int x) { return Between(x, 1, 12); }
static bool IsDay(int x) { return Between(x, 1, 31); }
+ private:
static const int kSize = 3;
int comp_[kSize];
int index_;
int named_month_;
+ // If set, ensures that data is always parsed in year-month-date order.
+ bool is_iso_date_;
};
+
+ // Tries to parse an ES5 Date Time String. Returns the next token
+ // to continue with in the legacy date string parser. If parsing is
+ // complete, returns DateToken::EndOfInput(). If terminally unsuccessful,
+ // returns DateToken::Invalid(). Otherwise parsing continues in the
+ // legacy parser.
+ template <typename Char>
+ static DateParser::DateToken ParseES5DateTime(
+ DateStringTokenizer<Char>* scanner,
+ DayComposer* day,
+ TimeComposer* time,
+ TimeZoneComposer* tz);
};
diff --git a/deps/v8/src/debug-debugger.js b/deps/v8/src/debug-debugger.js
index c632e4771..36b624e57 100644
--- a/deps/v8/src/debug-debugger.js
+++ b/deps/v8/src/debug-debugger.js
@@ -2311,21 +2311,10 @@ DebugCommandProcessor.prototype.versionRequest_ = function(request, response) {
DebugCommandProcessor.prototype.profileRequest_ = function(request, response) {
- if (!request.arguments) {
- return response.failed('Missing arguments');
- }
- var modules = parseInt(request.arguments.modules);
- if (isNaN(modules)) {
- return response.failed('Modules is not an integer');
- }
- var tag = parseInt(request.arguments.tag);
- if (isNaN(tag)) {
- tag = 0;
- }
if (request.arguments.command == 'resume') {
- %ProfilerResume(modules, tag);
+ %ProfilerResume();
} else if (request.arguments.command == 'pause') {
- %ProfilerPause(modules, tag);
+ %ProfilerPause();
} else {
return response.failed('Unknown command');
}
diff --git a/deps/v8/src/deoptimizer.cc b/deps/v8/src/deoptimizer.cc
index e2e8a65e2..e8c659718 100644
--- a/deps/v8/src/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer.cc
@@ -44,6 +44,9 @@ DeoptimizerData::DeoptimizerData() {
lazy_deoptimization_entry_code_ = NULL;
current_ = NULL;
deoptimizing_code_list_ = NULL;
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ deoptimized_frame_info_ = NULL;
+#endif
}
@@ -58,6 +61,16 @@ DeoptimizerData::~DeoptimizerData() {
}
}
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+void DeoptimizerData::Iterate(ObjectVisitor* v) {
+ if (deoptimized_frame_info_ != NULL) {
+ deoptimized_frame_info_->Iterate(v);
+ }
+}
+#endif
+
+
Deoptimizer* Deoptimizer::New(JSFunction* function,
BailoutType type,
unsigned bailout_id,
@@ -70,7 +83,8 @@ Deoptimizer* Deoptimizer::New(JSFunction* function,
type,
bailout_id,
from,
- fp_to_sp_delta);
+ fp_to_sp_delta,
+ NULL);
ASSERT(isolate->deoptimizer_data()->current_ == NULL);
isolate->deoptimizer_data()->current_ = deoptimizer;
return deoptimizer;
@@ -86,6 +100,92 @@ Deoptimizer* Deoptimizer::Grab(Isolate* isolate) {
return result;
}
+#ifdef ENABLE_DEBUGGER_SUPPORT
+DeoptimizedFrameInfo* Deoptimizer::DebuggerInspectableFrame(
+ JavaScriptFrame* frame,
+ int frame_index,
+ Isolate* isolate) {
+ ASSERT(isolate == Isolate::Current());
+ ASSERT(frame->is_optimized());
+ ASSERT(isolate->deoptimizer_data()->deoptimized_frame_info_ == NULL);
+
+ // Get the function and code from the frame.
+ JSFunction* function = JSFunction::cast(frame->function());
+ Code* code = frame->LookupCode();
+ Address code_start_address = code->instruction_start();
+
+ // Locate the deoptimization point in the code. As we are at a call the
+ // return address must be at a place in the code with deoptimization support.
+ int deoptimization_index = Safepoint::kNoDeoptimizationIndex;
+ // Scope this as the safe point constructor will disallow allocation.
+ {
+ SafepointTable table(code);
+ for (unsigned i = 0; i < table.length(); ++i) {
+ Address address = code_start_address + table.GetPcOffset(i);
+ if (address == frame->pc()) {
+ SafepointEntry safepoint_entry = table.GetEntry(i);
+ ASSERT(safepoint_entry.deoptimization_index() !=
+ Safepoint::kNoDeoptimizationIndex);
+ deoptimization_index = safepoint_entry.deoptimization_index();
+ break;
+ }
+ }
+ }
+ ASSERT(deoptimization_index != Safepoint::kNoDeoptimizationIndex);
+
+ // Always use the actual stack slots when calculating the fp to sp
+ // delta adding two for the function and context.
+ unsigned stack_slots = code->stack_slots();
+ unsigned fp_to_sp_delta = ((stack_slots + 2) * kPointerSize);
+
+ Deoptimizer* deoptimizer = new Deoptimizer(isolate,
+ function,
+ Deoptimizer::DEBUGGER,
+ deoptimization_index,
+ frame->pc(),
+ fp_to_sp_delta,
+ code);
+ Address tos = frame->fp() - fp_to_sp_delta;
+ deoptimizer->FillInputFrame(tos, frame);
+
+ // Calculate the output frames.
+ Deoptimizer::ComputeOutputFrames(deoptimizer);
+
+ // Create the GC safe output frame information and register it for GC
+ // handling.
+ ASSERT_LT(frame_index, deoptimizer->output_count());
+ DeoptimizedFrameInfo* info =
+ new DeoptimizedFrameInfo(deoptimizer, frame_index);
+ isolate->deoptimizer_data()->deoptimized_frame_info_ = info;
+
+ // Get the "simulated" top and size for the requested frame.
+ Address top =
+ reinterpret_cast<Address>(deoptimizer->output_[frame_index]->GetTop());
+ unsigned size =
+ deoptimizer->output_[frame_index]->GetFrameSize() / kPointerSize;
+
+ // Done with the GC-unsafe frame descriptions. This re-enables allocation.
+ deoptimizer->DeleteFrameDescriptions();
+
+ // Allocate a heap number for the doubles belonging to this frame.
+ deoptimizer->MaterializeHeapNumbersForDebuggerInspectableFrame(
+ top, size, info);
+
+ // Finished using the deoptimizer instance.
+ delete deoptimizer;
+
+ return info;
+}
+
+
+void Deoptimizer::DeleteDebuggerInspectableFrame(DeoptimizedFrameInfo* info,
+ Isolate* isolate) {
+ ASSERT(isolate == Isolate::Current());
+ ASSERT(isolate->deoptimizer_data()->deoptimized_frame_info_ == info);
+ delete info;
+ isolate->deoptimizer_data()->deoptimized_frame_info_ = NULL;
+}
+#endif
void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
int count,
@@ -209,18 +309,24 @@ Deoptimizer::Deoptimizer(Isolate* isolate,
BailoutType type,
unsigned bailout_id,
Address from,
- int fp_to_sp_delta)
+ int fp_to_sp_delta,
+ Code* optimized_code)
: isolate_(isolate),
function_(function),
bailout_id_(bailout_id),
bailout_type_(type),
from_(from),
fp_to_sp_delta_(fp_to_sp_delta),
+ input_(NULL),
output_count_(0),
output_(NULL),
deferred_heap_numbers_(0) {
if (FLAG_trace_deopt && type != OSR) {
- PrintF("**** DEOPT: ");
+ if (type == DEBUGGER) {
+ PrintF("**** DEOPT FOR DEBUGGER: ");
+ } else {
+ PrintF("**** DEOPT: ");
+ }
function->PrintName();
PrintF(" at bailout #%u, address 0x%" V8PRIxPTR ", frame size %d\n",
bailout_id,
@@ -248,10 +354,16 @@ Deoptimizer::Deoptimizer(Isolate* isolate,
optimized_code_ = function_->code();
ASSERT(optimized_code_->kind() == Code::OPTIMIZED_FUNCTION);
ASSERT(!optimized_code_->contains(from));
+ } else if (type == DEBUGGER) {
+ optimized_code_ = optimized_code;
+ ASSERT(optimized_code_->contains(from));
}
ASSERT(HEAP->allow_allocation(false));
unsigned size = ComputeInputFrameSize();
input_ = new(size) FrameDescription(size, function);
+#ifdef DEBUG
+ input_->SetKind(Code::OPTIMIZED_FUNCTION);
+#endif
}
@@ -417,6 +529,7 @@ void Deoptimizer::DoComputeOutputFrames() {
void Deoptimizer::MaterializeHeapNumbers() {
+ ASSERT_NE(DEBUGGER, bailout_type_);
for (int i = 0; i < deferred_heap_numbers_.length(); i++) {
HeapNumberMaterializationDescriptor d = deferred_heap_numbers_[i];
Handle<Object> num = isolate_->factory()->NewNumber(d.value());
@@ -432,6 +545,35 @@ void Deoptimizer::MaterializeHeapNumbers() {
}
+#ifdef ENABLE_DEBUGGER_SUPPORT
+void Deoptimizer::MaterializeHeapNumbersForDebuggerInspectableFrame(
+ Address top, intptr_t size, DeoptimizedFrameInfo* info) {
+ ASSERT_EQ(DEBUGGER, bailout_type_);
+ for (int i = 0; i < deferred_heap_numbers_.length(); i++) {
+ HeapNumberMaterializationDescriptor d = deferred_heap_numbers_[i];
+
+ // Check of the heap number to materialize actually belong to the frame
+ // being extracted.
+ Address slot = d.slot_address();
+ if (top <= slot && slot < top + size) {
+ Handle<Object> num = isolate_->factory()->NewNumber(d.value());
+ int expression_index = static_cast<int>(
+ info->expression_count_ - (slot - top) / kPointerSize - 1);
+ if (FLAG_trace_deopt) {
+ PrintF("Materializing a new heap number %p [%e] in slot %p"
+ "for expression stack index %d\n",
+ reinterpret_cast<void*>(*num),
+ d.value(),
+ d.slot_address(),
+ expression_index);
+ }
+ info->SetExpression(expression_index, *num);
+ }
+ }
+}
+#endif
+
+
void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
int frame_index,
unsigned output_offset) {
@@ -972,18 +1114,32 @@ unsigned FrameDescription::GetOffsetFromSlotIndex(Deoptimizer* deoptimizer,
if (slot_index >= 0) {
// Local or spill slots. Skip the fixed part of the frame
// including all arguments.
- unsigned base = static_cast<unsigned>(
- GetFrameSize() - deoptimizer->ComputeFixedSize(GetFunction()));
+ unsigned base =
+ GetFrameSize() - deoptimizer->ComputeFixedSize(GetFunction());
return base - ((slot_index + 1) * kPointerSize);
} else {
// Incoming parameter.
- unsigned base = static_cast<unsigned>(GetFrameSize() -
- deoptimizer->ComputeIncomingArgumentSize(GetFunction()));
+ unsigned base = GetFrameSize() -
+ deoptimizer->ComputeIncomingArgumentSize(GetFunction());
return base - ((slot_index + 1) * kPointerSize);
}
}
+unsigned FrameDescription::GetExpressionCount(Deoptimizer* deoptimizer) {
+ ASSERT_EQ(Code::FUNCTION, kind_);
+ unsigned size = GetFrameSize() - deoptimizer->ComputeFixedSize(GetFunction());
+ return size / kPointerSize;
+}
+
+
+Object* FrameDescription::GetExpression(Deoptimizer* deoptimizer, int index) {
+ ASSERT_EQ(Code::FUNCTION, kind_);
+ unsigned offset = GetOffsetFromSlotIndex(deoptimizer, index);
+ return reinterpret_cast<Object*>(*GetFrameSlotPointer(offset));
+}
+
+
void TranslationBuffer::Add(int32_t value) {
// Encode the sign bit in the least significant bit.
bool is_negative = (value < 0);
@@ -1256,4 +1412,24 @@ void SlotRef::ComputeSlotMappingForArguments(JavaScriptFrame* frame,
}
+DeoptimizedFrameInfo::DeoptimizedFrameInfo(
+ Deoptimizer* deoptimizer, int frame_index) {
+ FrameDescription* output_frame = deoptimizer->output_[frame_index];
+ expression_count_ = output_frame->GetExpressionCount(deoptimizer);
+ expression_stack_ = new Object*[expression_count_];
+ for (int i = 0; i < expression_count_; i++) {
+ SetExpression(i, output_frame->GetExpression(deoptimizer, i));
+ }
+}
+
+
+DeoptimizedFrameInfo::~DeoptimizedFrameInfo() {
+ delete expression_stack_;
+}
+
+void DeoptimizedFrameInfo::Iterate(ObjectVisitor* v) {
+ v->VisitPointers(expression_stack_, expression_stack_ + expression_count_);
+}
+
+
} } // namespace v8::internal
diff --git a/deps/v8/src/deoptimizer.h b/deps/v8/src/deoptimizer.h
index 91030e2d8..64823183a 100644
--- a/deps/v8/src/deoptimizer.h
+++ b/deps/v8/src/deoptimizer.h
@@ -41,7 +41,7 @@ namespace internal {
class FrameDescription;
class TranslationIterator;
class DeoptimizingCodeListNode;
-
+class DeoptimizedFrameInfo;
class HeapNumberMaterializationDescriptor BASE_EMBEDDED {
public:
@@ -81,11 +81,19 @@ class DeoptimizerData {
DeoptimizerData();
~DeoptimizerData();
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ void Iterate(ObjectVisitor* v);
+#endif
+
private:
LargeObjectChunk* eager_deoptimization_entry_code_;
LargeObjectChunk* lazy_deoptimization_entry_code_;
Deoptimizer* current_;
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ DeoptimizedFrameInfo* deoptimized_frame_info_;
+#endif
+
// List of deoptimized code which still have references from active stack
// frames. These code objects are needed by the deoptimizer when deoptimizing
// a frame for which the code object for the function function has been
@@ -103,7 +111,10 @@ class Deoptimizer : public Malloced {
enum BailoutType {
EAGER,
LAZY,
- OSR
+ OSR,
+ // This last bailout type is not really a bailout, but used by the
+ // debugger to deoptimize stack frames to allow inspection.
+ DEBUGGER
};
int output_count() const { return output_count_; }
@@ -116,6 +127,16 @@ class Deoptimizer : public Malloced {
Isolate* isolate);
static Deoptimizer* Grab(Isolate* isolate);
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ // The returned object with information on the optimized frame needs to be
+ // freed before another one can be generated.
+ static DeoptimizedFrameInfo* DebuggerInspectableFrame(JavaScriptFrame* frame,
+ int frame_index,
+ Isolate* isolate);
+ static void DeleteDebuggerInspectableFrame(DeoptimizedFrameInfo* info,
+ Isolate* isolate);
+#endif
+
// Makes sure that there is enough room in the relocation
// information of a code object to perform lazy deoptimization
// patching. If there is not enough room a new relocation
@@ -171,6 +192,10 @@ class Deoptimizer : public Malloced {
~Deoptimizer();
void MaterializeHeapNumbers();
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ void MaterializeHeapNumbersForDebuggerInspectableFrame(
+ Address top, intptr_t size, DeoptimizedFrameInfo* info);
+#endif
static void ComputeOutputFrames(Deoptimizer* deoptimizer);
@@ -233,7 +258,8 @@ class Deoptimizer : public Malloced {
BailoutType type,
unsigned bailout_id,
Address from,
- int fp_to_sp_delta);
+ int fp_to_sp_delta,
+ Code* optimized_code);
void DeleteFrameDescriptions();
void DoComputeOutputFrames();
@@ -269,6 +295,11 @@ class Deoptimizer : public Malloced {
static Code* FindDeoptimizingCodeFromAddress(Address addr);
static void RemoveDeoptimizingCode(Code* code);
+ // Fill the input from from a JavaScript frame. This is used when
+ // the debugger needs to inspect an optimized frame. For normal
+ // deoptimizations the input frame is filled in generated code.
+ void FillInputFrame(Address tos, JavaScriptFrame* frame);
+
Isolate* isolate_;
JSFunction* function_;
Code* optimized_code_;
@@ -290,6 +321,7 @@ class Deoptimizer : public Malloced {
friend class FrameDescription;
friend class DeoptimizingCodeListNode;
+ friend class DeoptimizedFrameInfo;
};
@@ -308,7 +340,10 @@ class FrameDescription {
free(description);
}
- intptr_t GetFrameSize() const { return frame_size_; }
+ uint32_t GetFrameSize() const {
+ ASSERT(static_cast<uint32_t>(frame_size_) == frame_size_);
+ return static_cast<uint32_t>(frame_size_);
+ }
JSFunction* GetFunction() const { return function_; }
@@ -360,6 +395,17 @@ class FrameDescription {
void SetContinuation(intptr_t pc) { continuation_ = pc; }
+#ifdef DEBUG
+ Code::Kind GetKind() const { return kind_; }
+ void SetKind(Code::Kind kind) { kind_ = kind; }
+#endif
+
+ // Get the expression stack height for a unoptimized frame.
+ unsigned GetExpressionCount(Deoptimizer* deoptimizer);
+
+ // Get the expression stack value for an unoptimized frame.
+ Object* GetExpression(Deoptimizer* deoptimizer, int index);
+
static int registers_offset() {
return OFFSET_OF(FrameDescription, registers_);
}
@@ -391,6 +437,9 @@ class FrameDescription {
private:
static const uint32_t kZapUint32 = 0xbeeddead;
+ // Frame_size_ must hold a uint32_t value. It is only a uintptr_t to
+ // keep the variable-size array frame_content_ of type intptr_t at
+ // the end of the structure aligned.
uintptr_t frame_size_; // Number of bytes.
JSFunction* function_;
intptr_t registers_[Register::kNumRegisters];
@@ -399,6 +448,9 @@ class FrameDescription {
intptr_t pc_;
intptr_t fp_;
Smi* state_;
+#ifdef DEBUG
+ Code::Kind kind_;
+#endif
// Continuation is the PC where the execution continues after
// deoptimizing.
@@ -597,6 +649,42 @@ class SlotRef BASE_EMBEDDED {
};
+#ifdef ENABLE_DEBUGGER_SUPPORT
+// Class used to represent an unoptimized frame when the debugger
+// needs to inspect a frame that is part of an optimized frame. The
+// internally used FrameDescription objects are not GC safe so for use
+// by the debugger frame information is copied to an object of this type.
+class DeoptimizedFrameInfo : public Malloced {
+ public:
+ DeoptimizedFrameInfo(Deoptimizer* deoptimizer, int frame_index);
+ virtual ~DeoptimizedFrameInfo();
+
+ // GC support.
+ void Iterate(ObjectVisitor* v);
+
+ // Return the height of the expression stack.
+ int expression_count() { return expression_count_; }
+
+ // Get an expression from the expression stack.
+ Object* GetExpression(int index) {
+ ASSERT(0 <= index && index < expression_count());
+ return expression_stack_[index];
+ }
+
+ private:
+ // Set an expression on the expression stack.
+ void SetExpression(int index, Object* obj) {
+ ASSERT(0 <= index && index < expression_count());
+ expression_stack_[index] = obj;
+ }
+
+ int expression_count_;
+ Object** expression_stack_;
+
+ friend class Deoptimizer;
+};
+#endif
+
} } // namespace v8::internal
#endif // V8_DEOPTIMIZER_H_
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h
index e24271939..6e13dd2d0 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flag-definitions.h
@@ -203,7 +203,6 @@ DEFINE_bool(deopt, true, "support deoptimization")
DEFINE_bool(trace_deopt, false, "trace deoptimization")
// compiler.cc
-DEFINE_bool(strict, false, "strict error checking")
DEFINE_int(min_preparse_length, 1024,
"minimum length for automatic enable preparsing")
DEFINE_bool(always_full_compiler, false,
@@ -372,6 +371,8 @@ DEFINE_bool(debug_script_collected_events, true,
DEFINE_bool(gdbjit, false, "enable GDBJIT interface (disables compacting GC)")
DEFINE_bool(gdbjit_full, false, "enable GDBJIT interface for all code objects")
DEFINE_bool(gdbjit_dump, false, "dump elf objects with debug info to disk")
+DEFINE_string(gdbjit_dump_filter, "",
+ "dump only objects containing this substring")
//
// Debug only flags
diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc
index d81d5afaa..4e67463f1 100644
--- a/deps/v8/src/frames.cc
+++ b/deps/v8/src/frames.cc
@@ -528,6 +528,17 @@ Address StandardFrame::GetExpressionAddress(int n) const {
}
+Object* StandardFrame::GetExpression(Address fp, int index) {
+ return Memory::Object_at(GetExpressionAddress(fp, index));
+}
+
+
+Address StandardFrame::GetExpressionAddress(Address fp, int n) {
+ const int offset = StandardFrameConstants::kExpressionsOffset;
+ return fp + offset - n * kPointerSize;
+}
+
+
int StandardFrame::ComputeExpressionsCount() const {
const int offset =
StandardFrameConstants::kExpressionsOffset + kPointerSize;
@@ -646,6 +657,16 @@ bool JavaScriptFrame::IsConstructor() const {
}
+int JavaScriptFrame::GetArgumentsLength() const {
+ // If there is an arguments adaptor frame get the arguments length from it.
+ if (has_adapted_arguments()) {
+ return Smi::cast(GetExpression(caller_fp(), 0))->value();
+ } else {
+ return GetNumberOfIncomingArguments();
+ }
+}
+
+
Code* JavaScriptFrame::unchecked_code() const {
JSFunction* function = JSFunction::cast(this->function());
return function->unchecked_code();
@@ -812,6 +833,22 @@ DeoptimizationInputData* OptimizedFrame::GetDeoptimizationData(
}
+int OptimizedFrame::GetInlineCount() {
+ ASSERT(is_optimized());
+
+ int deopt_index = Safepoint::kNoDeoptimizationIndex;
+ DeoptimizationInputData* data = GetDeoptimizationData(&deopt_index);
+
+ TranslationIterator it(data->TranslationByteArray(),
+ data->TranslationIndex(deopt_index)->value());
+ Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
+ ASSERT(opcode == Translation::BEGIN);
+ USE(opcode);
+ int frame_count = it.Next();
+ return frame_count;
+}
+
+
void OptimizedFrame::GetFunctions(List<JSFunction*>* functions) {
ASSERT(functions->length() == 0);
ASSERT(is_optimized());
diff --git a/deps/v8/src/frames.h b/deps/v8/src/frames.h
index aa91026fb..9e93daef3 100644
--- a/deps/v8/src/frames.h
+++ b/deps/v8/src/frames.h
@@ -383,6 +383,7 @@ class StandardFrame: public StackFrame {
inline Object* GetExpression(int index) const;
inline void SetExpression(int index, Object* value);
int ComputeExpressionsCount() const;
+ static Object* GetExpression(Address fp, int index);
virtual void SetCallerFp(Address caller_fp);
@@ -411,6 +412,7 @@ class StandardFrame: public StackFrame {
// Returns the address of the n'th expression stack element.
Address GetExpressionAddress(int n) const;
+ static Address GetExpressionAddress(Address fp, int n);
// Determines if the n'th expression stack element is in a stack
// handler or not. Requires traversing all handlers in this frame.
@@ -483,6 +485,7 @@ class JavaScriptFrame: public StandardFrame {
// actual passed arguments are available in an arguments adaptor
// frame below it on the stack.
inline bool has_adapted_arguments() const;
+ int GetArgumentsLength() const;
// Garbage collection support.
virtual void Iterate(ObjectVisitor* v) const;
@@ -495,6 +498,9 @@ class JavaScriptFrame: public StandardFrame {
// Determine the code for the frame.
virtual Code* unchecked_code() const;
+ // Returns the levels of inlining for this frame.
+ virtual int GetInlineCount() { return 1; }
+
// Return a list with JSFunctions of this frame.
virtual void GetFunctions(List<JSFunction*>* functions);
@@ -533,6 +539,8 @@ class OptimizedFrame : public JavaScriptFrame {
// GC support.
virtual void Iterate(ObjectVisitor* v) const;
+ virtual int GetInlineCount();
+
// Return a list with JSFunctions of this frame.
// The functions are ordered bottom-to-top (i.e. functions.last()
// is the top-most activation)
diff --git a/deps/v8/src/full-codegen.cc b/deps/v8/src/full-codegen.cc
index 03abfbd85..70db6114a 100644
--- a/deps/v8/src/full-codegen.cc
+++ b/deps/v8/src/full-codegen.cc
@@ -401,7 +401,7 @@ int FullCodeGenerator::SlotOffset(Slot* slot) {
// Adjust by a (parameter or local) base offset.
switch (slot->type()) {
case Slot::PARAMETER:
- offset += (scope()->num_parameters() + 1) * kPointerSize;
+ offset += (info_->scope()->num_parameters() + 1) * kPointerSize;
break;
case Slot::LOCAL:
offset += JavaScriptFrameConstants::kLocal0Offset;
@@ -1106,7 +1106,7 @@ void FullCodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
// Extend the context before executing the catch block.
{ Comment cmnt(masm_, "[ Extend catch context");
- __ Push(stmt->name());
+ __ Push(stmt->variable()->name());
__ push(result_register());
PushFunctionArgumentForContextAllocation();
__ CallRuntime(Runtime::kPushCatchContext, 3);
@@ -1114,7 +1114,11 @@ void FullCodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
context_register());
}
+ Scope* saved_scope = scope();
+ scope_ = stmt->scope();
+ ASSERT(scope_->declarations()->is_empty());
Visit(stmt->catch_block());
+ scope_ = saved_scope;
__ jmp(&done);
// Try block code. Sets up the exception handler chain.
diff --git a/deps/v8/src/full-codegen.h b/deps/v8/src/full-codegen.h
index 7a421841b..d25ca490f 100644
--- a/deps/v8/src/full-codegen.h
+++ b/deps/v8/src/full-codegen.h
@@ -80,6 +80,7 @@ class FullCodeGenerator: public AstVisitor {
explicit FullCodeGenerator(MacroAssembler* masm)
: masm_(masm),
info_(NULL),
+ scope_(NULL),
nesting_stack_(NULL),
loop_depth_(0),
context_(NULL),
@@ -531,23 +532,11 @@ class FullCodeGenerator: public AstVisitor {
return is_strict_mode() ? kStrictMode : kNonStrictMode;
}
FunctionLiteral* function() { return info_->function(); }
- Scope* scope() { return info_->scope(); }
+ Scope* scope() { return scope_; }
static Register result_register();
static Register context_register();
- // Helper for calling an IC stub.
- void EmitCallIC(Handle<Code> ic,
- RelocInfo::Mode mode,
- unsigned ast_id);
-
- // Calling an IC stub with a patch site. Passing NULL for patch_site
- // or non NULL patch_site which is not activated indicates no inlined smi code
- // and emits a nop after the IC call.
- void EmitCallIC(Handle<Code> ic,
- JumpPatchSite* patch_site,
- unsigned ast_id);
-
// Set fields in the stack frame. Offsets are the frame pointer relative
// offsets defined in, e.g., StandardFrameConstants.
void StoreToFrameField(int frame_offset, Register value);
@@ -758,6 +747,7 @@ class FullCodeGenerator: public AstVisitor {
MacroAssembler* masm_;
CompilationInfo* info_;
+ Scope* scope_;
Label return_label_;
NestedStatement* nesting_stack_;
int loop_depth_;
diff --git a/deps/v8/src/gdb-jit.cc b/deps/v8/src/gdb-jit.cc
index b4992a7f5..4d57e2546 100644
--- a/deps/v8/src/gdb-jit.cc
+++ b/deps/v8/src/gdb-jit.cc
@@ -34,16 +34,29 @@
#include "global-handles.h"
#include "messages.h"
#include "natives.h"
+#include "scopeinfo.h"
namespace v8 {
namespace internal {
+#ifdef __APPLE__
+#define __MACH_O
+class MachO;
+class MachOSection;
+typedef MachO DebugObject;
+typedef MachOSection DebugSection;
+#else
+#define __ELF
class ELF;
+class ELFSection;
+typedef ELF DebugObject;
+typedef ELFSection DebugSection;
+#endif
class Writer BASE_EMBEDDED {
public:
- explicit Writer(ELF* elf)
- : elf_(elf),
+ explicit Writer(DebugObject* debug_object)
+ : debug_object_(debug_object),
position_(0),
capacity_(1024),
buffer_(reinterpret_cast<byte*>(malloc(capacity_))) {
@@ -112,7 +125,7 @@ class Writer BASE_EMBEDDED {
}
}
- ELF* elf() { return elf_; }
+ DebugObject* debug_object() { return debug_object_; }
byte* buffer() { return buffer_; }
@@ -165,7 +178,7 @@ class Writer BASE_EMBEDDED {
return reinterpret_cast<T*>(&buffer_[offset]);
}
- ELF* elf_;
+ DebugObject* debug_object_;
uintptr_t position_;
uintptr_t capacity_;
byte* buffer_;
@@ -173,21 +186,120 @@ class Writer BASE_EMBEDDED {
class StringTable;
-class ELFSection : public ZoneObject {
+template<typename THeader>
+class DebugSectionBase : public ZoneObject {
public:
- struct Header {
- uint32_t name;
- uint32_t type;
- uintptr_t flags;
- uintptr_t address;
- uintptr_t offset;
- uintptr_t size;
- uint32_t link;
- uint32_t info;
- uintptr_t alignment;
- uintptr_t entry_size;
+ virtual ~DebugSectionBase() { }
+
+ virtual void WriteBody(Writer::Slot<THeader> header, Writer* writer) {
+ uintptr_t start = writer->position();
+ if (WriteBody(writer)) {
+ uintptr_t end = writer->position();
+ header->offset = start;
+#if defined(__MACH_O)
+ header->addr = 0;
+#endif
+ header->size = end - start;
+ }
+ }
+
+ virtual bool WriteBody(Writer* writer) {
+ return false;
+ }
+
+ typedef THeader Header;
+};
+
+
+struct MachOSectionHeader {
+ char sectname[16];
+ char segname[16];
+#if defined(V8_TARGET_ARCH_IA32)
+ uint32_t addr;
+ uint32_t size;
+#else
+ uint64_t addr;
+ uint64_t size;
+#endif
+ uint32_t offset;
+ uint32_t align;
+ uint32_t reloff;
+ uint32_t nreloc;
+ uint32_t flags;
+ uint32_t reserved1;
+ uint32_t reserved2;
+};
+
+
+class MachOSection : public DebugSectionBase<MachOSectionHeader> {
+ public:
+ enum Type {
+ S_REGULAR = 0x0u,
+ S_ATTR_COALESCED = 0xbu,
+ S_ATTR_SOME_INSTRUCTIONS = 0x400u,
+ S_ATTR_DEBUG = 0x02000000u,
+ S_ATTR_PURE_INSTRUCTIONS = 0x80000000u
};
+ MachOSection(const char* name,
+ const char* segment,
+ uintptr_t align,
+ uint32_t flags)
+ : name_(name),
+ segment_(segment),
+ align_(align),
+ flags_(flags) {
+ ASSERT(IsPowerOf2(align));
+ if (align_ != 0) {
+ align_ = WhichPowerOf2(align_);
+ }
+ }
+
+ virtual ~MachOSection() { }
+
+ virtual void PopulateHeader(Writer::Slot<Header> header) {
+ header->addr = 0;
+ header->size = 0;
+ header->offset = 0;
+ header->align = align_;
+ header->reloff = 0;
+ header->nreloc = 0;
+ header->flags = flags_;
+ header->reserved1 = 0;
+ header->reserved2 = 0;
+ memset(header->sectname, 0, sizeof(header->sectname));
+ memset(header->segname, 0, sizeof(header->segname));
+ ASSERT(strlen(name_) < sizeof(header->sectname));
+ ASSERT(strlen(segment_) < sizeof(header->segname));
+ strncpy(header->sectname, name_, sizeof(header->sectname));
+ strncpy(header->segname, segment_, sizeof(header->segname));
+ }
+
+ private:
+ const char* name_;
+ const char* segment_;
+ uintptr_t align_;
+ uint32_t flags_;
+};
+
+
+struct ELFSectionHeader {
+ uint32_t name;
+ uint32_t type;
+ uintptr_t flags;
+ uintptr_t address;
+ uintptr_t offset;
+ uintptr_t size;
+ uint32_t link;
+ uint32_t info;
+ uintptr_t alignment;
+ uintptr_t entry_size;
+};
+
+
+#if defined(__ELF)
+class ELFSection : public DebugSectionBase<ELFSectionHeader> {
+ public:
enum Type {
TYPE_NULL = 0,
TYPE_PROGBITS = 1,
@@ -252,15 +364,45 @@ class ELFSection : public ZoneObject {
header->entry_size = 0;
}
-
private:
const char* name_;
Type type_;
uintptr_t align_;
uint16_t index_;
};
+#endif // defined(__ELF)
+
+
+#if defined(__MACH_O)
+class MachOTextSection : public MachOSection {
+ public:
+ MachOTextSection(uintptr_t align,
+ uintptr_t addr,
+ uintptr_t size)
+ : MachOSection("__text",
+ "__TEXT",
+ align,
+ MachOSection::S_REGULAR |
+ MachOSection::S_ATTR_SOME_INSTRUCTIONS |
+ MachOSection::S_ATTR_PURE_INSTRUCTIONS),
+ addr_(addr),
+ size_(size) { }
+ protected:
+ virtual void PopulateHeader(Writer::Slot<Header> header) {
+ MachOSection::PopulateHeader(header);
+ header->addr = addr_;
+ header->size = size_;
+ }
+ private:
+ uintptr_t addr_;
+ uintptr_t size_;
+};
+#endif // defined(__MACH_O)
+
+
+#if defined(__ELF)
class FullHeaderELFSection : public ELFSection {
public:
FullHeaderELFSection(const char* name,
@@ -349,8 +491,139 @@ void ELFSection::PopulateHeader(Writer::Slot<ELFSection::Header> header,
header->alignment = align_;
PopulateHeader(header);
}
+#endif // defined(__ELF)
+
+
+#if defined(__MACH_O)
+class MachO BASE_EMBEDDED {
+ public:
+ MachO() : sections_(6) { }
+
+ uint32_t AddSection(MachOSection* section) {
+ sections_.Add(section);
+ return sections_.length() - 1;
+ }
+
+ void Write(Writer* w, uintptr_t code_start, uintptr_t code_size) {
+ Writer::Slot<MachOHeader> header = WriteHeader(w);
+ uintptr_t load_command_start = w->position();
+ Writer::Slot<MachOSegmentCommand> cmd = WriteSegmentCommand(w,
+ code_start,
+ code_size);
+ WriteSections(w, cmd, header, load_command_start);
+ }
+
+ private:
+ struct MachOHeader {
+ uint32_t magic;
+ uint32_t cputype;
+ uint32_t cpusubtype;
+ uint32_t filetype;
+ uint32_t ncmds;
+ uint32_t sizeofcmds;
+ uint32_t flags;
+#if defined(V8_TARGET_ARCH_X64)
+ uint32_t reserved;
+#endif
+ };
+
+ struct MachOSegmentCommand {
+ uint32_t cmd;
+ uint32_t cmdsize;
+ char segname[16];
+#if defined(V8_TARGET_ARCH_IA32)
+ uint32_t vmaddr;
+ uint32_t vmsize;
+ uint32_t fileoff;
+ uint32_t filesize;
+#else
+ uint64_t vmaddr;
+ uint64_t vmsize;
+ uint64_t fileoff;
+ uint64_t filesize;
+#endif
+ uint32_t maxprot;
+ uint32_t initprot;
+ uint32_t nsects;
+ uint32_t flags;
+ };
+
+ enum MachOLoadCommandCmd {
+ LC_SEGMENT_32 = 0x00000001u,
+ LC_SEGMENT_64 = 0x00000019u
+ };
+
+
+ Writer::Slot<MachOHeader> WriteHeader(Writer* w) {
+ ASSERT(w->position() == 0);
+ Writer::Slot<MachOHeader> header = w->CreateSlotHere<MachOHeader>();
+#if defined(V8_TARGET_ARCH_IA32)
+ header->magic = 0xFEEDFACEu;
+ header->cputype = 7; // i386
+ header->cpusubtype = 3; // CPU_SUBTYPE_I386_ALL
+#elif defined(V8_TARGET_ARCH_X64)
+ header->magic = 0xFEEDFACFu;
+ header->cputype = 7 | 0x01000000; // i386 | 64-bit ABI
+ header->cpusubtype = 3; // CPU_SUBTYPE_I386_ALL
+ header->reserved = 0;
+#else
+#error Unsupported target architecture.
+#endif
+ header->filetype = 0x1; // MH_OBJECT
+ header->ncmds = 1;
+ header->sizeofcmds = 0;
+ header->flags = 0;
+ return header;
+ }
+
+
+ Writer::Slot<MachOSegmentCommand> WriteSegmentCommand(Writer* w,
+ uintptr_t code_start,
+ uintptr_t code_size) {
+ Writer::Slot<MachOSegmentCommand> cmd =
+ w->CreateSlotHere<MachOSegmentCommand>();
+#if defined(V8_TARGET_ARCH_IA32)
+ cmd->cmd = LC_SEGMENT_32;
+#else
+ cmd->cmd = LC_SEGMENT_64;
+#endif
+ cmd->vmaddr = code_start;
+ cmd->vmsize = code_size;
+ cmd->fileoff = 0;
+ cmd->filesize = 0;
+ cmd->maxprot = 7;
+ cmd->initprot = 7;
+ cmd->flags = 0;
+ cmd->nsects = sections_.length();
+ memset(cmd->segname, 0, 16);
+ cmd->cmdsize = sizeof(MachOSegmentCommand) + sizeof(MachOSection::Header) *
+ cmd->nsects;
+ return cmd;
+ }
+
+
+ void WriteSections(Writer* w,
+ Writer::Slot<MachOSegmentCommand> cmd,
+ Writer::Slot<MachOHeader> header,
+ uintptr_t load_command_start) {
+ Writer::Slot<MachOSection::Header> headers =
+ w->CreateSlotsHere<MachOSection::Header>(sections_.length());
+ cmd->fileoff = w->position();
+ header->sizeofcmds = w->position() - load_command_start;
+ for (int section = 0; section < sections_.length(); ++section) {
+ sections_[section]->PopulateHeader(headers.at(section));
+ sections_[section]->WriteBody(headers.at(section), w);
+ }
+ cmd->filesize = w->position() - (uintptr_t)cmd->fileoff;
+ }
+
+
+ ZoneList<MachOSection*> sections_;
+};
+#endif // defined(__MACH_O)
+#if defined(__ELF)
class ELF BASE_EMBEDDED {
public:
ELF() : sections_(6) {
@@ -596,7 +869,7 @@ class ELFSymbolTable : public ELFSection {
// String table for this symbol table should follow it in the section table.
StringTable* strtab =
- static_cast<StringTable*>(w->elf()->SectionAt(index() + 1));
+ static_cast<StringTable*>(w->debug_object()->SectionAt(index() + 1));
strtab->AttachWriter(w);
symbols.at(0).set(ELFSymbol::SerializedLayout(0,
0,
@@ -640,6 +913,7 @@ class ELFSymbolTable : public ELFSection {
ZoneList<ELFSymbol> locals_;
ZoneList<ELFSymbol> globals_;
};
+#endif // defined(__ELF)
class CodeDescription BASE_EMBEDDED {
@@ -657,12 +931,14 @@ class CodeDescription BASE_EMBEDDED {
Code* code,
Handle<Script> script,
GDBJITLineInfo* lineinfo,
- GDBJITInterface::CodeTag tag)
+ GDBJITInterface::CodeTag tag,
+ CompilationInfo* info)
: name_(name),
code_(code),
script_(script),
lineinfo_(lineinfo),
- tag_(tag) {
+ tag_(tag),
+ info_(info) {
}
const char* name() const {
@@ -677,6 +953,14 @@ class CodeDescription BASE_EMBEDDED {
return tag_;
}
+ CompilationInfo* info() const {
+ return info_;
+ }
+
+ bool IsInfoAvailable() const {
+ return info_ != NULL;
+ }
+
uintptr_t CodeStart() const {
return reinterpret_cast<uintptr_t>(code_->instruction_start());
}
@@ -724,12 +1008,13 @@ class CodeDescription BASE_EMBEDDED {
Handle<Script> script_;
GDBJITLineInfo* lineinfo_;
GDBJITInterface::CodeTag tag_;
+ CompilationInfo* info_;
#ifdef V8_TARGET_ARCH_X64
uintptr_t stack_state_start_addresses_[STACK_STATE_MAX];
#endif
};
-
+#if defined(__ELF)
static void CreateSymbolsTable(CodeDescription* desc,
ELF* elf,
int text_section_index) {
@@ -754,14 +1039,42 @@ static void CreateSymbolsTable(CodeDescription* desc,
ELFSymbol::TYPE_FUNC,
text_section_index));
}
+#endif // defined(__ELF)
-class DebugInfoSection : public ELFSection {
+class DebugInfoSection : public DebugSection {
public:
explicit DebugInfoSection(CodeDescription* desc)
- : ELFSection(".debug_info", TYPE_PROGBITS, 1), desc_(desc) { }
+#if defined(__ELF)
+ : ELFSection(".debug_info", TYPE_PROGBITS, 1),
+#else
+ : MachOSection("__debug_info",
+ "__DWARF",
+ 1,
+ MachOSection::S_REGULAR | MachOSection::S_ATTR_DEBUG),
+#endif
+ desc_(desc) { }
+
+ // DWARF2 standard
+ enum DWARF2LocationOp {
+ DW_OP_reg0 = 0x50,
+ DW_OP_reg1 = 0x51,
+ DW_OP_reg2 = 0x52,
+ DW_OP_reg3 = 0x53,
+ DW_OP_reg4 = 0x54,
+ DW_OP_reg5 = 0x55,
+ DW_OP_reg6 = 0x56,
+ DW_OP_reg7 = 0x57,
+ DW_OP_fbreg = 0x91 // 1 param: SLEB128 offset
+ };
+
+ enum DWARF2Encoding {
+ DW_ATE_ADDRESS = 0x1,
+ DW_ATE_SIGNED = 0x5
+ };
bool WriteBody(Writer* w) {
+ uintptr_t cu_start = w->position();
Writer::Slot<uint32_t> size = w->CreateSlotHere<uint32_t>();
uintptr_t start = w->position();
w->Write<uint16_t>(2); // DWARF version.
@@ -773,6 +1086,123 @@ class DebugInfoSection : public ELFSection {
w->Write<intptr_t>(desc_->CodeStart());
w->Write<intptr_t>(desc_->CodeStart() + desc_->CodeSize());
w->Write<uint32_t>(0);
+
+ uint32_t ty_offset = static_cast<uint32_t>(w->position() - cu_start);
+ w->WriteULEB128(3);
+ w->Write<uint8_t>(kPointerSize);
+ w->WriteString("v8value");
+
+ if (desc_->IsInfoAvailable()) {
+ CompilationInfo* info = desc_->info();
+ ScopeInfo<FreeStoreAllocationPolicy> scope_info(info->scope());
+ w->WriteULEB128(2);
+ w->WriteString(desc_->name());
+ w->Write<intptr_t>(desc_->CodeStart());
+ w->Write<intptr_t>(desc_->CodeStart() + desc_->CodeSize());
+ Writer::Slot<uint32_t> fb_block_size = w->CreateSlotHere<uint32_t>();
+ uintptr_t fb_block_start = w->position();
+#if defined(V8_TARGET_ARCH_IA32)
+ w->Write<uint8_t>(DW_OP_reg5); // The frame pointer's here on ia32
+#elif defined(V8_TARGET_ARCH_X64)
+ w->Write<uint8_t>(DW_OP_reg6); // and here on x64.
+#else
+#error Unsupported target architecture.
+#endif
+ fb_block_size.set(static_cast<uint32_t>(w->position() - fb_block_start));
+
+ int params = scope_info.number_of_parameters();
+ int slots = scope_info.number_of_stack_slots();
+ int context_slots = scope_info.number_of_context_slots();
+ // The real slot ID is internal_slots + context_slot_id.
+ int internal_slots = Context::MIN_CONTEXT_SLOTS;
+ int locals = scope_info.NumberOfLocals();
+ int current_abbreviation = 4;
+
+ for (int param = 0; param < params; ++param) {
+ w->WriteULEB128(current_abbreviation++);
+ w->WriteString(
+ *scope_info.parameter_name(param)->ToCString(DISALLOW_NULLS));
+ w->Write<uint32_t>(ty_offset);
+ Writer::Slot<uint32_t> block_size = w->CreateSlotHere<uint32_t>();
+ uintptr_t block_start = w->position();
+ w->Write<uint8_t>(DW_OP_fbreg);
+ w->WriteSLEB128(
+ JavaScriptFrameConstants::kLastParameterOffset +
+ kPointerSize * (params - param - 1));
+ block_size.set(static_cast<uint32_t>(w->position() - block_start));
+ }
+
+ EmbeddedVector<char, 256> buffer;
+ StringBuilder builder(buffer.start(), buffer.length());
+
+ for (int slot = 0; slot < slots; ++slot) {
+ w->WriteULEB128(current_abbreviation++);
+ builder.Reset();
+ builder.AddFormatted("slot%d", slot);
+ w->WriteString(builder.Finalize());
+ }
+
+ // See contexts.h for more information.
+ ASSERT(Context::MIN_CONTEXT_SLOTS == 4);
+ ASSERT(Context::CLOSURE_INDEX == 0);
+ ASSERT(Context::PREVIOUS_INDEX == 1);
+ ASSERT(Context::EXTENSION_INDEX == 2);
+ ASSERT(Context::GLOBAL_INDEX == 3);
+ w->WriteULEB128(current_abbreviation++);
+ w->WriteString(".closure");
+ w->WriteULEB128(current_abbreviation++);
+ w->WriteString(".previous");
+ w->WriteULEB128(current_abbreviation++);
+ w->WriteString(".extension");
+ w->WriteULEB128(current_abbreviation++);
+ w->WriteString(".global");
+
+ for (int context_slot = 0;
+ context_slot < context_slots;
+ ++context_slot) {
+ w->WriteULEB128(current_abbreviation++);
+ builder.Reset();
+ builder.AddFormatted("context_slot%d", context_slot + internal_slots);
+ w->WriteString(builder.Finalize());
+ }
+
+ for (int local = 0; local < locals; ++local) {
+ w->WriteULEB128(current_abbreviation++);
+ w->WriteString(
+ *scope_info.LocalName(local)->ToCString(DISALLOW_NULLS));
+ w->Write<uint32_t>(ty_offset);
+ Writer::Slot<uint32_t> block_size = w->CreateSlotHere<uint32_t>();
+ uintptr_t block_start = w->position();
+ w->Write<uint8_t>(DW_OP_fbreg);
+ w->WriteSLEB128(
+ JavaScriptFrameConstants::kLocal0Offset -
+ kPointerSize * local);
+ block_size.set(static_cast<uint32_t>(w->position() - block_start));
+ }
+
+ {
+ w->WriteULEB128(current_abbreviation++);
+ w->WriteString("__function");
+ w->Write<uint32_t>(ty_offset);
+ Writer::Slot<uint32_t> block_size = w->CreateSlotHere<uint32_t>();
+ uintptr_t block_start = w->position();
+ w->Write<uint8_t>(DW_OP_fbreg);
+ w->WriteSLEB128(JavaScriptFrameConstants::kFunctionOffset);
+ block_size.set(static_cast<uint32_t>(w->position() - block_start));
+ }
+
+ {
+ w->WriteULEB128(current_abbreviation++);
+ w->WriteString("__context");
+ w->Write<uint32_t>(ty_offset);
+ Writer::Slot<uint32_t> block_size = w->CreateSlotHere<uint32_t>();
+ uintptr_t block_start = w->position();
+ w->Write<uint8_t>(DW_OP_fbreg);
+ w->WriteSLEB128(StandardFrameConstants::kContextOffset);
+ block_size.set(static_cast<uint32_t>(w->position() - block_start));
+ }
+ }
+
size.set(static_cast<uint32_t>(w->position() - start));
return true;
}
@@ -782,13 +1212,28 @@ class DebugInfoSection : public ELFSection {
};
-class DebugAbbrevSection : public ELFSection {
+class DebugAbbrevSection : public DebugSection {
public:
- DebugAbbrevSection() : ELFSection(".debug_abbrev", TYPE_PROGBITS, 1) { }
+ explicit DebugAbbrevSection(CodeDescription* desc)
+#ifdef __ELF
+ : ELFSection(".debug_abbrev", TYPE_PROGBITS, 1),
+#else
+ : MachOSection("__debug_abbrev",
+ "__DWARF",
+ 1,
+ MachOSection::S_REGULAR | MachOSection::S_ATTR_DEBUG),
+#endif
+ desc_(desc) { }
// DWARF2 standard, figure 14.
enum DWARF2Tags {
- DW_TAG_COMPILE_UNIT = 0x11
+ DW_TAG_FORMAL_PARAMETER = 0x05,
+ DW_TAG_POINTER_TYPE = 0xf,
+ DW_TAG_COMPILE_UNIT = 0x11,
+ DW_TAG_STRUCTURE_TYPE = 0x13,
+ DW_TAG_BASE_TYPE = 0x24,
+ DW_TAG_SUBPROGRAM = 0x2e,
+ DW_TAG_VARIABLE = 0x34
};
// DWARF2 standard, figure 16.
@@ -799,23 +1244,55 @@ class DebugAbbrevSection : public ELFSection {
// DWARF standard, figure 17.
enum DWARF2Attribute {
+ DW_AT_LOCATION = 0x2,
DW_AT_NAME = 0x3,
+ DW_AT_BYTE_SIZE = 0xb,
DW_AT_STMT_LIST = 0x10,
DW_AT_LOW_PC = 0x11,
- DW_AT_HIGH_PC = 0x12
+ DW_AT_HIGH_PC = 0x12,
+ DW_AT_ENCODING = 0x3e,
+ DW_AT_FRAME_BASE = 0x40,
+ DW_AT_TYPE = 0x49
};
// DWARF2 standard, figure 19.
enum DWARF2AttributeForm {
DW_FORM_ADDR = 0x1,
+ DW_FORM_BLOCK4 = 0x4,
DW_FORM_STRING = 0x8,
- DW_FORM_DATA4 = 0x6
+ DW_FORM_DATA4 = 0x6,
+ DW_FORM_BLOCK = 0x9,
+ DW_FORM_DATA1 = 0xb,
+ DW_FORM_FLAG = 0xc,
+ DW_FORM_REF4 = 0x13
};
+ void WriteVariableAbbreviation(Writer* w,
+ int abbreviation_code,
+ bool has_value,
+ bool is_parameter) {
+ w->WriteULEB128(abbreviation_code);
+ w->WriteULEB128(is_parameter ? DW_TAG_FORMAL_PARAMETER : DW_TAG_VARIABLE);
+ w->Write<uint8_t>(DW_CHILDREN_NO);
+ w->WriteULEB128(DW_AT_NAME);
+ w->WriteULEB128(DW_FORM_STRING);
+ if (has_value) {
+ w->WriteULEB128(DW_AT_TYPE);
+ w->WriteULEB128(DW_FORM_REF4);
+ w->WriteULEB128(DW_AT_LOCATION);
+ w->WriteULEB128(DW_FORM_BLOCK4);
+ }
+ w->WriteULEB128(0);
+ w->WriteULEB128(0);
+ }
+
bool WriteBody(Writer* w) {
- w->WriteULEB128(1);
+ int current_abbreviation = 1;
+ bool extra_info = desc_->IsInfoAvailable();
+ ASSERT(desc_->IsLineInfoAvailable());
+ w->WriteULEB128(current_abbreviation++);
w->WriteULEB128(DW_TAG_COMPILE_UNIT);
- w->Write<uint8_t>(DW_CHILDREN_NO);
+ w->Write<uint8_t>(extra_info ? DW_CHILDREN_YES : DW_CHILDREN_NO);
w->WriteULEB128(DW_AT_NAME);
w->WriteULEB128(DW_FORM_STRING);
w->WriteULEB128(DW_AT_LOW_PC);
@@ -826,16 +1303,101 @@ class DebugAbbrevSection : public ELFSection {
w->WriteULEB128(DW_FORM_DATA4);
w->WriteULEB128(0);
w->WriteULEB128(0);
- w->WriteULEB128(0);
+
+ if (extra_info) {
+ CompilationInfo* info = desc_->info();
+ ScopeInfo<FreeStoreAllocationPolicy> scope_info(info->scope());
+ int params = scope_info.number_of_parameters();
+ int slots = scope_info.number_of_stack_slots();
+ int context_slots = scope_info.number_of_context_slots();
+ // The real slot ID is internal_slots + context_slot_id.
+ int internal_slots = Context::MIN_CONTEXT_SLOTS;
+ int locals = scope_info.NumberOfLocals();
+ int total_children =
+ params + slots + context_slots + internal_slots + locals + 2;
+
+ // The extra duplication below seems to be necessary to keep
+ // gdb from getting upset on OSX.
+ w->WriteULEB128(current_abbreviation++); // Abbreviation code.
+ w->WriteULEB128(DW_TAG_SUBPROGRAM);
+ w->Write<uint8_t>(
+ total_children != 0 ? DW_CHILDREN_YES : DW_CHILDREN_NO);
+ w->WriteULEB128(DW_AT_NAME);
+ w->WriteULEB128(DW_FORM_STRING);
+ w->WriteULEB128(DW_AT_LOW_PC);
+ w->WriteULEB128(DW_FORM_ADDR);
+ w->WriteULEB128(DW_AT_HIGH_PC);
+ w->WriteULEB128(DW_FORM_ADDR);
+ w->WriteULEB128(DW_AT_FRAME_BASE);
+ w->WriteULEB128(DW_FORM_BLOCK4);
+ w->WriteULEB128(0);
+ w->WriteULEB128(0);
+
+ w->WriteULEB128(current_abbreviation++);
+ w->WriteULEB128(DW_TAG_STRUCTURE_TYPE);
+ w->Write<uint8_t>(DW_CHILDREN_NO);
+ w->WriteULEB128(DW_AT_BYTE_SIZE);
+ w->WriteULEB128(DW_FORM_DATA1);
+ w->WriteULEB128(DW_AT_NAME);
+ w->WriteULEB128(DW_FORM_STRING);
+ w->WriteULEB128(0);
+ w->WriteULEB128(0);
+
+ for (int param = 0; param < params; ++param) {
+ WriteVariableAbbreviation(w, current_abbreviation++, true, true);
+ }
+
+ for (int slot = 0; slot < slots; ++slot) {
+ WriteVariableAbbreviation(w, current_abbreviation++, false, false);
+ }
+
+ for (int internal_slot = 0;
+ internal_slot < internal_slots;
+ ++internal_slot) {
+ WriteVariableAbbreviation(w, current_abbreviation++, false, false);
+ }
+
+ for (int context_slot = 0;
+ context_slot < context_slots;
+ ++context_slot) {
+ WriteVariableAbbreviation(w, current_abbreviation++, false, false);
+ }
+
+ for (int local = 0; local < locals; ++local) {
+ WriteVariableAbbreviation(w, current_abbreviation++, true, false);
+ }
+
+ // The function.
+ WriteVariableAbbreviation(w, current_abbreviation++, true, false);
+
+ // The context.
+ WriteVariableAbbreviation(w, current_abbreviation++, true, false);
+
+ if (total_children != 0) {
+ w->WriteULEB128(0); // Terminate the sibling list.
+ }
+ }
+
+ w->WriteULEB128(0); // Terminate the table.
return true;
}
+
+ private:
+ CodeDescription* desc_;
};
-class DebugLineSection : public ELFSection {
+class DebugLineSection : public DebugSection {
public:
explicit DebugLineSection(CodeDescription* desc)
+#ifdef __ELF
: ELFSection(".debug_line", TYPE_PROGBITS, 1),
+#else
+ : MachOSection("__debug_line",
+ "__DWARF",
+ 1,
+ MachOSection::S_REGULAR | MachOSection::S_ATTR_DEBUG),
+#endif
desc_(desc) { }
// DWARF2 standard, figure 34.
@@ -992,8 +1554,7 @@ class DebugLineSection : public ELFSection {
#ifdef V8_TARGET_ARCH_X64
-
-class UnwindInfoSection : public ELFSection {
+class UnwindInfoSection : public DebugSection {
public:
explicit UnwindInfoSection(CodeDescription *desc);
virtual bool WriteBody(Writer *w);
@@ -1079,8 +1640,13 @@ void UnwindInfoSection::WriteLength(Writer *w,
UnwindInfoSection::UnwindInfoSection(CodeDescription *desc)
- : ELFSection(".eh_frame", TYPE_X86_64_UNWIND, 1), desc_(desc)
-{ }
+#ifdef __ELF
+ : ELFSection(".eh_frame", TYPE_X86_64_UNWIND, 1),
+#else
+ : MachOSection("__eh_frame", "__TEXT", sizeof(uintptr_t),
+ MachOSection::S_REGULAR),
+#endif
+ desc_(desc) { }
int UnwindInfoSection::WriteCIE(Writer *w) {
Writer::Slot<uint32_t> cie_length_slot = w->CreateSlotHere<uint32_t>();
@@ -1212,15 +1778,14 @@ bool UnwindInfoSection::WriteBody(Writer *w) {
#endif // V8_TARGET_ARCH_X64
-
-static void CreateDWARFSections(CodeDescription* desc, ELF* elf) {
+static void CreateDWARFSections(CodeDescription* desc, DebugObject* obj) {
if (desc->IsLineInfoAvailable()) {
- elf->AddSection(new DebugInfoSection(desc));
- elf->AddSection(new DebugAbbrevSection);
- elf->AddSection(new DebugLineSection(desc));
+ obj->AddSection(new DebugInfoSection(desc));
+ obj->AddSection(new DebugAbbrevSection(desc));
+ obj->AddSection(new DebugLineSection(desc));
}
#ifdef V8_TARGET_ARCH_X64
- elf->AddSection(new UnwindInfoSection(desc));
+ obj->AddSection(new UnwindInfoSection(desc));
#endif
}
@@ -1260,6 +1825,13 @@ extern "C" {
// Static initialization is necessary to prevent GDB from seeing
// uninitialized descriptor.
JITDescriptor __jit_debug_descriptor = { 1, 0, 0, 0 };
+
+#ifdef OBJECT_PRINT
+ void __gdb_print_v8_object(MaybeObject* object) {
+ object->Print();
+ fprintf(stdout, "\n");
+ }
+#endif
}
@@ -1283,17 +1855,23 @@ static void DestroyCodeEntry(JITCodeEntry* entry) {
}
-static void RegisterCodeEntry(JITCodeEntry* entry) {
+static void RegisterCodeEntry(JITCodeEntry* entry,
+ bool dump_if_enabled,
+ const char* name_hint) {
#if defined(DEBUG) && !defined(WIN32)
static int file_num = 0;
- if (FLAG_gdbjit_dump) {
+ if (FLAG_gdbjit_dump && dump_if_enabled) {
static const int kMaxFileNameSize = 64;
static const char* kElfFilePrefix = "/tmp/elfdump";
static const char* kObjFileExt = ".o";
char file_name[64];
- OS::SNPrintF(Vector<char>(file_name, kMaxFileNameSize), "%s%d%s",
- kElfFilePrefix, file_num++, kObjFileExt);
+ OS::SNPrintF(Vector<char>(file_name, kMaxFileNameSize),
+ "%s%s%d%s",
+ kElfFilePrefix,
+ (name_hint != NULL) ? name_hint : "",
+ file_num++,
+ kObjFileExt);
WriteBytes(file_name, entry->symfile_addr_, entry->symfile_size_);
}
#endif
@@ -1327,7 +1905,18 @@ static void UnregisterCodeEntry(JITCodeEntry* entry) {
static JITCodeEntry* CreateELFObject(CodeDescription* desc) {
ZoneScope zone_scope(Isolate::Current(), DELETE_ON_EXIT);
+#ifdef __MACH_O
+ MachO mach_o;
+ Writer w(&mach_o);
+
+ mach_o.AddSection(new MachOTextSection(kCodeAlignment,
+ desc->CodeStart(),
+ desc->CodeSize()));
+
+ CreateDWARFSections(desc, &mach_o);
+ mach_o.Write(&w, desc->CodeStart(), desc->CodeSize());
+#else
ELF elf;
Writer w(&elf);
@@ -1345,6 +1934,7 @@ static JITCodeEntry* CreateELFObject(CodeDescription* desc) {
CreateDWARFSections(desc, &elf);
elf.Write(&w);
+#endif
return CreateCodeEntry(w.buffer(), w.position());
}
@@ -1393,7 +1983,8 @@ static GDBJITLineInfo* UntagLineInfo(void* ptr) {
void GDBJITInterface::AddCode(Handle<String> name,
Handle<Script> script,
- Handle<Code> code) {
+ Handle<Code> code,
+ CompilationInfo* info) {
if (!FLAG_gdbjit) return;
// Force initialization of line_ends array.
@@ -1401,9 +1992,9 @@ void GDBJITInterface::AddCode(Handle<String> name,
if (!name.is_null()) {
SmartPointer<char> name_cstring = name->ToCString(DISALLOW_NULLS);
- AddCode(*name_cstring, *code, GDBJITInterface::FUNCTION, *script);
+ AddCode(*name_cstring, *code, GDBJITInterface::FUNCTION, *script, info);
} else {
- AddCode("", *code, GDBJITInterface::FUNCTION, *script);
+ AddCode("", *code, GDBJITInterface::FUNCTION, *script, info);
}
}
@@ -1450,7 +2041,8 @@ Mutex* GDBJITInterface::mutex_ = OS::CreateMutex();
void GDBJITInterface::AddCode(const char* name,
Code* code,
GDBJITInterface::CodeTag tag,
- Script* script) {
+ Script* script,
+ CompilationInfo* info) {
if (!FLAG_gdbjit) return;
ScopedLock lock(mutex_);
@@ -1465,7 +2057,8 @@ void GDBJITInterface::AddCode(const char* name,
script != NULL ? Handle<Script>(script)
: Handle<Script>(),
lineinfo,
- tag);
+ tag,
+ info);
if (!FLAG_gdbjit_full && !code_desc.IsLineInfoAvailable()) {
delete lineinfo;
@@ -1480,7 +2073,18 @@ void GDBJITInterface::AddCode(const char* name,
delete lineinfo;
e->value = entry;
- RegisterCodeEntry(entry);
+ const char* name_hint = NULL;
+ bool should_dump = false;
+ if (FLAG_gdbjit_dump) {
+ if (strlen(FLAG_gdbjit_dump_filter) == 0) {
+ name_hint = name;
+ should_dump = true;
+ } else if (name != NULL) {
+ name_hint = strstr(name, FLAG_gdbjit_dump_filter);
+ should_dump = (name_hint != NULL);
+ }
+ }
+ RegisterCodeEntry(entry, should_dump, name_hint);
}
@@ -1500,7 +2104,7 @@ void GDBJITInterface::AddCode(GDBJITInterface::CodeTag tag,
builder.AddFormatted(": code object %p", static_cast<void*>(code));
}
- AddCode(builder.Finalize(), code, tag);
+ AddCode(builder.Finalize(), code, tag, NULL, NULL);
}
diff --git a/deps/v8/src/gdb-jit.h b/deps/v8/src/gdb-jit.h
index 0c80fb65b..2cf15bc61 100644
--- a/deps/v8/src/gdb-jit.h
+++ b/deps/v8/src/gdb-jit.h
@@ -43,6 +43,8 @@
namespace v8 {
namespace internal {
+class CompilationInfo;
+
#define CODE_TAGS_LIST(V) \
V(LOAD_IC) \
V(KEYED_LOAD_IC) \
@@ -113,11 +115,13 @@ class GDBJITInterface: public AllStatic {
static void AddCode(const char* name,
Code* code,
CodeTag tag,
- Script* script = NULL);
+ Script* script,
+ CompilationInfo* info);
static void AddCode(Handle<String> name,
Handle<Script> script,
- Handle<Code> code);
+ Handle<Code> code,
+ CompilationInfo* info);
static void AddCode(CodeTag tag, String* name, Code* code);
diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles.cc
index 1891ef5ca..d8cc74257 100644
--- a/deps/v8/src/handles.cc
+++ b/deps/v8/src/handles.cc
@@ -214,9 +214,10 @@ void NormalizeProperties(Handle<JSObject> object,
}
-void NormalizeElements(Handle<JSObject> object) {
- CALL_HEAP_FUNCTION_VOID(object->GetIsolate(),
- object->NormalizeElements());
+Handle<NumberDictionary> NormalizeElements(Handle<JSObject> object) {
+ CALL_HEAP_FUNCTION(object->GetIsolate(),
+ object->NormalizeElements(),
+ NumberDictionary);
}
diff --git a/deps/v8/src/handles.h b/deps/v8/src/handles.h
index 5c64cf501..13c6dd67f 100644
--- a/deps/v8/src/handles.h
+++ b/deps/v8/src/handles.h
@@ -170,7 +170,7 @@ class HandleScope {
void NormalizeProperties(Handle<JSObject> object,
PropertyNormalizationMode mode,
int expected_additional_properties);
-void NormalizeElements(Handle<JSObject> object);
+Handle<NumberDictionary> NormalizeElements(Handle<JSObject> object);
void TransformToFastProperties(Handle<JSObject> object,
int unused_property_fields);
MUST_USE_RESULT Handle<NumberDictionary> NumberDictionarySet(
diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc
index 6bb0206cf..2aca68ddb 100644
--- a/deps/v8/src/heap.cc
+++ b/deps/v8/src/heap.cc
@@ -33,6 +33,7 @@
#include "codegen.h"
#include "compilation-cache.h"
#include "debug.h"
+#include "deoptimizer.h"
#include "global-handles.h"
#include "heap-profiler.h"
#include "liveobjectlist-inl.h"
@@ -4664,6 +4665,9 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
#ifdef ENABLE_DEBUGGER_SUPPORT
isolate_->debug()->Iterate(v);
+ if (isolate_->deoptimizer_data() != NULL) {
+ isolate_->deoptimizer_data()->Iterate(v);
+ }
#endif
v->Synchronize("debug");
isolate_->compilation_cache()->Iterate(v);
diff --git a/deps/v8/src/hydrogen-instructions.cc b/deps/v8/src/hydrogen-instructions.cc
index 771770ee8..c8db9a024 100644
--- a/deps/v8/src/hydrogen-instructions.cc
+++ b/deps/v8/src/hydrogen-instructions.cc
@@ -669,7 +669,7 @@ void HCallRuntime::PrintDataTo(StringStream* stream) {
}
-void HClassOfTest::PrintDataTo(StringStream* stream) {
+void HClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("class_of_test(");
value()->PrintNameTo(stream);
stream->Add(", \"%o\")", *class_name());
@@ -747,7 +747,7 @@ void HUnaryOperation::PrintDataTo(StringStream* stream) {
}
-void HHasInstanceType::PrintDataTo(StringStream* stream) {
+void HHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
switch (from_) {
case FIRST_JS_RECEIVER_TYPE:
@@ -768,7 +768,7 @@ void HHasInstanceType::PrintDataTo(StringStream* stream) {
}
-void HTypeofIs::PrintDataTo(StringStream* stream) {
+void HTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
stream->Add(" == ");
stream->Add(type_literal_->ToAsciiVector());
@@ -1231,25 +1231,28 @@ Range* HShl::InferRange() {
-void HCompare::PrintDataTo(StringStream* stream) {
+void HCompareGeneric::PrintDataTo(StringStream* stream) {
stream->Add(Token::Name(token()));
stream->Add(" ");
HBinaryOperation::PrintDataTo(stream);
}
-void HCompare::SetInputRepresentation(Representation r) {
+void HCompareIDAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add(Token::Name(token()));
+ stream->Add(" ");
+ left()->PrintNameTo(stream);
+ stream->Add(" ");
+ right()->PrintNameTo(stream);
+}
+
+
+void HCompareIDAndBranch::SetInputRepresentation(Representation r) {
input_representation_ = r;
- if (r.IsTagged()) {
- SetAllSideEffects();
- ClearFlag(kUseGVN);
- } else if (r.IsDouble()) {
+ if (r.IsDouble()) {
SetFlag(kDeoptimizeOnUndefined);
- ClearAllSideEffects();
- SetFlag(kUseGVN);
} else {
- ClearAllSideEffects();
- SetFlag(kUseGVN);
+ ASSERT(r.IsInteger32());
}
}
@@ -1566,17 +1569,7 @@ HType HConstant::CalculateInferredType() {
}
-HType HCompare::CalculateInferredType() {
- return HType::Boolean();
-}
-
-
-HType HCompareObjectEq::CalculateInferredType() {
- return HType::Boolean();
-}
-
-
-HType HUnaryPredicate::CalculateInferredType() {
+HType HCompareGeneric::CalculateInferredType() {
return HType::Boolean();
}
diff --git a/deps/v8/src/hydrogen-instructions.h b/deps/v8/src/hydrogen-instructions.h
index a0cab6aed..8ee841937 100644
--- a/deps/v8/src/hydrogen-instructions.h
+++ b/deps/v8/src/hydrogen-instructions.h
@@ -72,6 +72,7 @@ class LChunkBuilder;
V(BitXor) \
V(BlockEntry) \
V(BoundsCheck) \
+ V(Branch) \
V(CallConstantFunction) \
V(CallFunction) \
V(CallGlobal) \
@@ -89,11 +90,12 @@ class LChunkBuilder;
V(CheckPrototypeMaps) \
V(CheckSmi) \
V(ClampToUint8) \
- V(ClassOfTest) \
- V(Compare) \
- V(CompareObjectEq) \
+ V(ClassOfTestAndBranch) \
+ V(CompareIDAndBranch) \
+ V(CompareGeneric) \
+ V(CompareObjectEqAndBranch) \
V(CompareMap) \
- V(CompareConstantEq) \
+ V(CompareConstantEqAndBranch) \
V(Constant) \
V(Context) \
V(DeleteProperty) \
@@ -109,17 +111,17 @@ class LChunkBuilder;
V(GlobalObject) \
V(GlobalReceiver) \
V(Goto) \
- V(HasCachedArrayIndex) \
- V(HasInstanceType) \
+ V(HasCachedArrayIndexAndBranch) \
+ V(HasInstanceTypeAndBranch) \
V(In) \
V(InstanceOf) \
V(InstanceOfKnownGlobal) \
V(InvokeFunction) \
- V(IsConstructCall) \
- V(IsNull) \
- V(IsObject) \
- V(IsSmi) \
- V(IsUndetectable) \
+ V(IsConstructCallAndBranch) \
+ V(IsNullAndBranch) \
+ V(IsObjectAndBranch) \
+ V(IsSmiAndBranch) \
+ V(IsUndetectableAndBranch) \
V(JSArrayLength) \
V(LeaveInlined) \
V(LoadContextSlot) \
@@ -163,13 +165,12 @@ class LChunkBuilder;
V(StringCharFromCode) \
V(StringLength) \
V(Sub) \
- V(Test) \
V(ThisFunction) \
V(Throw) \
V(ToFastProperties) \
V(ToInt32) \
V(Typeof) \
- V(TypeofIs) \
+ V(TypeofIsAndBranch) \
V(UnaryMathOperation) \
V(UnknownOSRValue) \
V(UseConst) \
@@ -781,6 +782,7 @@ class HControlInstruction: public HInstruction {
public:
virtual HBasicBlock* SuccessorAt(int i) = 0;
virtual int SuccessorCount() = 0;
+ virtual void SetSuccessorAt(int i, HBasicBlock* block) = 0;
virtual void PrintDataTo(StringStream* stream);
@@ -815,12 +817,13 @@ class HTemplateControlInstruction: public HControlInstruction {
public:
int SuccessorCount() { return S; }
HBasicBlock* SuccessorAt(int i) { return successors_[i]; }
+ void SetSuccessorAt(int i, HBasicBlock* block) { successors_[i] = block; }
int OperandCount() { return V; }
HValue* OperandAt(int i) { return inputs_[i]; }
+
protected:
- void SetSuccessorAt(int i, HBasicBlock* block) { successors_[i] = block; }
void InternalSetOperandAt(int i, HValue* value) { inputs_[i] = value; }
private:
@@ -869,6 +872,9 @@ class HDeoptimize: public HControlInstruction {
UNREACHABLE();
return NULL;
}
+ virtual void SetSuccessorAt(int i, HBasicBlock* block) {
+ UNREACHABLE();
+ }
void AddEnvironmentValue(HValue* value) {
values_.Add(NULL);
@@ -922,18 +928,21 @@ class HUnaryControlInstruction: public HTemplateControlInstruction<2, 1> {
};
-class HTest: public HUnaryControlInstruction {
+class HBranch: public HUnaryControlInstruction {
public:
- HTest(HValue* value, HBasicBlock* true_target, HBasicBlock* false_target)
+ HBranch(HValue* value, HBasicBlock* true_target, HBasicBlock* false_target)
: HUnaryControlInstruction(value, true_target, false_target) {
ASSERT(true_target != NULL && false_target != NULL);
}
+ explicit HBranch(HValue* value)
+ : HUnaryControlInstruction(value, NULL, NULL) { }
+
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::None();
}
- DECLARE_CONCRETE_INSTRUCTION(Test)
+ DECLARE_CONCRETE_INSTRUCTION(Branch)
};
@@ -2520,43 +2529,58 @@ class HArithmeticBinaryOperation: public HBinaryOperation {
};
-class HCompare: public HBinaryOperation {
+class HCompareGeneric: public HBinaryOperation {
public:
- HCompare(HValue* left, HValue* right, Token::Value token)
+ HCompareGeneric(HValue* left, HValue* right, Token::Value token)
: HBinaryOperation(left, right), token_(token) {
ASSERT(Token::IsCompareOp(token));
set_representation(Representation::Tagged());
SetAllSideEffects();
}
- void SetInputRepresentation(Representation r);
-
- virtual bool EmitAtUses() {
- return !HasSideEffects() && !HasMultipleUses();
- }
-
virtual Representation RequiredInputRepresentation(int index) const {
- return input_representation_;
+ return Representation::Tagged();
}
Representation GetInputRepresentation() const {
- return input_representation_;
+ return Representation::Tagged();
}
+
Token::Value token() const { return token_; }
virtual void PrintDataTo(StringStream* stream);
virtual HType CalculateInferredType();
- virtual intptr_t Hashcode() {
- return HValue::Hashcode() * 7 + token_;
+ DECLARE_CONCRETE_INSTRUCTION(CompareGeneric)
+
+ private:
+ Token::Value token_;
+};
+
+
+class HCompareIDAndBranch: public HTemplateControlInstruction<2, 2> {
+ public:
+ HCompareIDAndBranch(HValue* left, HValue* right, Token::Value token)
+ : token_(token) {
+ ASSERT(Token::IsCompareOp(token));
+ SetOperandAt(0, left);
+ SetOperandAt(1, right);
}
- DECLARE_CONCRETE_INSTRUCTION(Compare)
+ HValue* left() { return OperandAt(0); }
+ HValue* right() { return OperandAt(1); }
+ Token::Value token() const { return token_; }
- protected:
- virtual bool DataEquals(HValue* other) {
- HCompare* comp = HCompare::cast(other);
- return token_ == comp->token();
+ void SetInputRepresentation(Representation r);
+ Representation GetInputRepresentation() const {
+ return input_representation_;
+ }
+
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return input_representation_;
}
+ virtual void PrintDataTo(StringStream* stream);
+
+ DECLARE_CONCRETE_INSTRUCTION(CompareIDAndBranch)
private:
Representation input_representation_;
@@ -2564,61 +2588,39 @@ class HCompare: public HBinaryOperation {
};
-class HCompareObjectEq: public HBinaryOperation {
+class HCompareObjectEqAndBranch: public HTemplateControlInstruction<2, 2> {
public:
- HCompareObjectEq(HValue* left, HValue* right)
- : HBinaryOperation(left, right) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetFlag(kDependsOnMaps);
+ HCompareObjectEqAndBranch(HValue* left, HValue* right) {
+ SetOperandAt(0, left);
+ SetOperandAt(1, right);
}
- virtual bool EmitAtUses() {
- return !HasSideEffects() && !HasMultipleUses();
- }
+ HValue* left() { return OperandAt(0); }
+ HValue* right() { return OperandAt(1); }
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
- virtual HType CalculateInferredType();
- DECLARE_CONCRETE_INSTRUCTION(CompareObjectEq)
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ DECLARE_CONCRETE_INSTRUCTION(CompareObjectEqAndBranch)
};
-class HCompareConstantEq: public HUnaryOperation {
+class HCompareConstantEqAndBranch: public HUnaryControlInstruction {
public:
- HCompareConstantEq(HValue* left, int right, Token::Value op)
- : HUnaryOperation(left), op_(op), right_(right) {
+ HCompareConstantEqAndBranch(HValue* left, int right, Token::Value op)
+ : HUnaryControlInstruction(left, NULL, NULL), op_(op), right_(right) {
ASSERT(op == Token::EQ_STRICT);
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
}
Token::Value op() const { return op_; }
int right() const { return right_; }
- virtual bool EmitAtUses() {
- return !HasSideEffects() && !HasMultipleUses();
- }
-
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Integer32();
}
- virtual HType CalculateInferredType() { return HType::Boolean(); }
-
- DECLARE_CONCRETE_INSTRUCTION(CompareConstantEq);
-
- protected:
- virtual bool DataEquals(HValue* other) {
- HCompareConstantEq* other_instr = HCompareConstantEq::cast(other);
- return (op_ == other_instr->op_ &&
- right_ == other_instr->right_);
- }
+ DECLARE_CONCRETE_INSTRUCTION(CompareConstantEqAndBranch);
private:
const Token::Value op_;
@@ -2626,139 +2628,112 @@ class HCompareConstantEq: public HUnaryOperation {
};
-class HUnaryPredicate: public HUnaryOperation {
+class HIsNullAndBranch: public HUnaryControlInstruction {
public:
- explicit HUnaryPredicate(HValue* value) : HUnaryOperation(value) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- }
+ HIsNullAndBranch(HValue* value, bool is_strict)
+ : HUnaryControlInstruction(value, NULL, NULL), is_strict_(is_strict) { }
- virtual bool EmitAtUses() {
- return !HasSideEffects() && !HasMultipleUses();
- }
+ bool is_strict() const { return is_strict_; }
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
- virtual HType CalculateInferredType();
-};
-
-
-class HIsNull: public HUnaryPredicate {
- public:
- HIsNull(HValue* value, bool is_strict)
- : HUnaryPredicate(value), is_strict_(is_strict) { }
-
- bool is_strict() const { return is_strict_; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsNull)
- protected:
- virtual bool DataEquals(HValue* other) {
- HIsNull* b = HIsNull::cast(other);
- return is_strict_ == b->is_strict();
- }
+ DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch)
private:
bool is_strict_;
};
-class HIsObject: public HUnaryPredicate {
+class HIsObjectAndBranch: public HUnaryControlInstruction {
public:
- explicit HIsObject(HValue* value) : HUnaryPredicate(value) { }
+ explicit HIsObjectAndBranch(HValue* value)
+ : HUnaryControlInstruction(value, NULL, NULL) { }
- DECLARE_CONCRETE_INSTRUCTION(IsObject)
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::Tagged();
+ }
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch)
};
-class HIsSmi: public HUnaryPredicate {
+class HIsSmiAndBranch: public HUnaryControlInstruction {
public:
- explicit HIsSmi(HValue* value) : HUnaryPredicate(value) { }
+ explicit HIsSmiAndBranch(HValue* value)
+ : HUnaryControlInstruction(value, NULL, NULL) { }
- DECLARE_CONCRETE_INSTRUCTION(IsSmi)
+ DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch)
+
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::Tagged();
+ }
protected:
virtual bool DataEquals(HValue* other) { return true; }
};
-class HIsUndetectable: public HUnaryPredicate {
+class HIsUndetectableAndBranch: public HUnaryControlInstruction {
public:
- explicit HIsUndetectable(HValue* value) : HUnaryPredicate(value) { }
+ explicit HIsUndetectableAndBranch(HValue* value)
+ : HUnaryControlInstruction(value, NULL, NULL) { }
- DECLARE_CONCRETE_INSTRUCTION(IsUndetectable)
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::Tagged();
+ }
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch)
};
-class HIsConstructCall: public HTemplateInstruction<0> {
+class HIsConstructCallAndBranch: public HTemplateControlInstruction<2, 0> {
public:
- HIsConstructCall() {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- }
-
- virtual bool EmitAtUses() {
- return !HasSideEffects() && !HasMultipleUses();
- }
-
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::None();
}
- DECLARE_CONCRETE_INSTRUCTION(IsConstructCall)
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch)
};
-class HHasInstanceType: public HUnaryPredicate {
+class HHasInstanceTypeAndBranch: public HUnaryControlInstruction {
public:
- HHasInstanceType(HValue* value, InstanceType type)
- : HUnaryPredicate(value), from_(type), to_(type) { }
- HHasInstanceType(HValue* value, InstanceType from, InstanceType to)
- : HUnaryPredicate(value), from_(from), to_(to) {
+ HHasInstanceTypeAndBranch(HValue* value, InstanceType type)
+ : HUnaryControlInstruction(value, NULL, NULL), from_(type), to_(type) { }
+ HHasInstanceTypeAndBranch(HValue* value, InstanceType from, InstanceType to)
+ : HUnaryControlInstruction(value, NULL, NULL), from_(from), to_(to) {
ASSERT(to == LAST_TYPE); // Others not implemented yet in backend.
}
InstanceType from() { return from_; }
InstanceType to() { return to_; }
- virtual bool EmitAtUses() {
- return !HasSideEffects() && !HasMultipleUses();
- }
-
virtual void PrintDataTo(StringStream* stream);
- DECLARE_CONCRETE_INSTRUCTION(HasInstanceType)
-
- protected:
- virtual bool DataEquals(HValue* other) {
- HHasInstanceType* b = HHasInstanceType::cast(other);
- return (from_ == b->from()) && (to_ == b->to());
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::Tagged();
}
+ DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch)
+
private:
InstanceType from_;
InstanceType to_; // Inclusive range, not all combinations work.
};
-class HHasCachedArrayIndex: public HUnaryPredicate {
+class HHasCachedArrayIndexAndBranch: public HUnaryControlInstruction {
public:
- explicit HHasCachedArrayIndex(HValue* value) : HUnaryPredicate(value) { }
+ explicit HHasCachedArrayIndexAndBranch(HValue* value)
+ : HUnaryControlInstruction(value, NULL, NULL) { }
- DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndex)
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::Tagged();
+ }
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch)
};
@@ -2780,42 +2755,40 @@ class HGetCachedArrayIndex: public HUnaryOperation {
};
-class HClassOfTest: public HUnaryPredicate {
+class HClassOfTestAndBranch: public HUnaryControlInstruction {
public:
- HClassOfTest(HValue* value, Handle<String> class_name)
- : HUnaryPredicate(value), class_name_(class_name) { }
+ HClassOfTestAndBranch(HValue* value, Handle<String> class_name)
+ : HUnaryControlInstruction(value, NULL, NULL),
+ class_name_(class_name) { }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch)
- DECLARE_CONCRETE_INSTRUCTION(ClassOfTest)
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::Tagged();
+ }
virtual void PrintDataTo(StringStream* stream);
Handle<String> class_name() const { return class_name_; }
- protected:
- virtual bool DataEquals(HValue* other) {
- HClassOfTest* b = HClassOfTest::cast(other);
- return class_name_.is_identical_to(b->class_name_);
- }
-
private:
Handle<String> class_name_;
};
-class HTypeofIs: public HUnaryPredicate {
+class HTypeofIsAndBranch: public HUnaryControlInstruction {
public:
- HTypeofIs(HValue* value, Handle<String> type_literal)
- : HUnaryPredicate(value), type_literal_(type_literal) { }
+ HTypeofIsAndBranch(HValue* value, Handle<String> type_literal)
+ : HUnaryControlInstruction(value, NULL, NULL),
+ type_literal_(type_literal) { }
Handle<String> type_literal() { return type_literal_; }
virtual void PrintDataTo(StringStream* stream);
- DECLARE_CONCRETE_INSTRUCTION(TypeofIs)
+ DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch)
- protected:
- virtual bool DataEquals(HValue* other) {
- HTypeofIs* b = HTypeofIs::cast(other);
- return type_literal_.is_identical_to(b->type_literal_);
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::Tagged();
}
private:
diff --git a/deps/v8/src/hydrogen.cc b/deps/v8/src/hydrogen.cc
index 38676624c..dff96b4f7 100644
--- a/deps/v8/src/hydrogen.cc
+++ b/deps/v8/src/hydrogen.cc
@@ -33,6 +33,7 @@
#include "hashmap.h"
#include "lithium-allocator.h"
#include "parser.h"
+#include "scopeinfo.h"
#include "scopes.h"
#include "stub-cache.h"
@@ -886,9 +887,8 @@ class HRangeAnalysis BASE_EMBEDDED {
private:
void TraceRange(const char* msg, ...);
void Analyze(HBasicBlock* block);
- void InferControlFlowRange(HTest* test, HBasicBlock* dest);
- void InferControlFlowRange(Token::Value op, HValue* value, HValue* other);
- void InferPhiRange(HPhi* phi);
+ void InferControlFlowRange(HCompareIDAndBranch* test, HBasicBlock* dest);
+ void UpdateControlFlowRange(Token::Value op, HValue* value, HValue* other);
void InferRange(HValue* value);
void RollBackTo(int index);
void AddRange(HValue* value, Range* range);
@@ -922,15 +922,15 @@ void HRangeAnalysis::Analyze(HBasicBlock* block) {
// Infer range based on control flow.
if (block->predecessors()->length() == 1) {
HBasicBlock* pred = block->predecessors()->first();
- if (pred->end()->IsTest()) {
- InferControlFlowRange(HTest::cast(pred->end()), block);
+ if (pred->end()->IsCompareIDAndBranch()) {
+ InferControlFlowRange(HCompareIDAndBranch::cast(pred->end()), block);
}
}
// Process phi instructions.
for (int i = 0; i < block->phis()->length(); ++i) {
HPhi* phi = block->phis()->at(i);
- InferPhiRange(phi);
+ InferRange(phi);
}
// Go through all instructions of the current block.
@@ -949,28 +949,26 @@ void HRangeAnalysis::Analyze(HBasicBlock* block) {
}
-void HRangeAnalysis::InferControlFlowRange(HTest* test, HBasicBlock* dest) {
+void HRangeAnalysis::InferControlFlowRange(HCompareIDAndBranch* test,
+ HBasicBlock* dest) {
ASSERT((test->FirstSuccessor() == dest) == (test->SecondSuccessor() != dest));
- if (test->value()->IsCompare()) {
- HCompare* compare = HCompare::cast(test->value());
- if (compare->GetInputRepresentation().IsInteger32()) {
- Token::Value op = compare->token();
- if (test->SecondSuccessor() == dest) {
- op = Token::NegateCompareOp(op);
- }
- Token::Value inverted_op = Token::InvertCompareOp(op);
- InferControlFlowRange(op, compare->left(), compare->right());
- InferControlFlowRange(inverted_op, compare->right(), compare->left());
+ if (test->GetInputRepresentation().IsInteger32()) {
+ Token::Value op = test->token();
+ if (test->SecondSuccessor() == dest) {
+ op = Token::NegateCompareOp(op);
}
+ Token::Value inverted_op = Token::InvertCompareOp(op);
+ UpdateControlFlowRange(op, test->left(), test->right());
+ UpdateControlFlowRange(inverted_op, test->right(), test->left());
}
}
// We know that value [op] other. Use this information to update the range on
// value.
-void HRangeAnalysis::InferControlFlowRange(Token::Value op,
- HValue* value,
- HValue* other) {
+void HRangeAnalysis::UpdateControlFlowRange(Token::Value op,
+ HValue* value,
+ HValue* other) {
Range temp_range;
Range* range = other->range() != NULL ? other->range() : &temp_range;
Range* new_range = NULL;
@@ -1001,12 +999,6 @@ void HRangeAnalysis::InferControlFlowRange(Token::Value op,
}
-void HRangeAnalysis::InferPhiRange(HPhi* phi) {
- // TODO(twuerthinger): Infer loop phi ranges.
- InferRange(phi);
-}
-
-
void HRangeAnalysis::InferRange(HValue* value) {
ASSERT(!value->HasRange());
if (!value->representation().IsNone()) {
@@ -1940,7 +1932,7 @@ void HGraph::MarkDeoptimizeOnUndefined() {
HPhase phase("MarkDeoptimizeOnUndefined", this);
// Compute DeoptimizeOnUndefined flag for phis.
// Any phi that can reach a use with DeoptimizeOnUndefined set must
- // have DeoptimizeOnUndefined set. Currently only HCompare, with
+ // have DeoptimizeOnUndefined set. Currently only HCompareIDAndBranch, with
// double input representation, has this flag set.
// The flag is used by HChange tagged->double, which must deoptimize
// if one of its uses has this flag set.
@@ -2078,14 +2070,28 @@ void TestContext::ReturnValue(HValue* value) {
void EffectContext::ReturnInstruction(HInstruction* instr, int ast_id) {
+ ASSERT(!instr->IsControlInstruction());
owner()->AddInstruction(instr);
if (instr->HasSideEffects()) owner()->AddSimulate(ast_id);
}
+void EffectContext::ReturnControl(HControlInstruction* instr, int ast_id) {
+ ASSERT(!instr->HasSideEffects());
+ HBasicBlock* empty_true = owner()->graph()->CreateBasicBlock();
+ HBasicBlock* empty_false = owner()->graph()->CreateBasicBlock();
+ instr->SetSuccessorAt(0, empty_true);
+ instr->SetSuccessorAt(1, empty_false);
+ owner()->current_block()->Finish(instr);
+ HBasicBlock* join = owner()->CreateJoin(empty_true, empty_false, ast_id);
+ owner()->set_current_block(join);
+}
+
+
void ValueContext::ReturnInstruction(HInstruction* instr, int ast_id) {
+ ASSERT(!instr->IsControlInstruction());
if (!arguments_allowed() && instr->CheckFlag(HValue::kIsArguments)) {
- owner()->Bailout("bad value context for arguments object value");
+ return owner()->Bailout("bad value context for arguments object value");
}
owner()->AddInstruction(instr);
owner()->Push(instr);
@@ -2093,7 +2099,28 @@ void ValueContext::ReturnInstruction(HInstruction* instr, int ast_id) {
}
+void ValueContext::ReturnControl(HControlInstruction* instr, int ast_id) {
+ ASSERT(!instr->HasSideEffects());
+ if (!arguments_allowed() && instr->CheckFlag(HValue::kIsArguments)) {
+ return owner()->Bailout("bad value context for arguments object value");
+ }
+ HBasicBlock* materialize_false = owner()->graph()->CreateBasicBlock();
+ HBasicBlock* materialize_true = owner()->graph()->CreateBasicBlock();
+ instr->SetSuccessorAt(0, materialize_true);
+ instr->SetSuccessorAt(1, materialize_false);
+ owner()->current_block()->Finish(instr);
+ owner()->set_current_block(materialize_true);
+ owner()->Push(owner()->graph()->GetConstantTrue());
+ owner()->set_current_block(materialize_false);
+ owner()->Push(owner()->graph()->GetConstantFalse());
+ HBasicBlock* join =
+ owner()->CreateJoin(materialize_true, materialize_false, ast_id);
+ owner()->set_current_block(join);
+}
+
+
void TestContext::ReturnInstruction(HInstruction* instr, int ast_id) {
+ ASSERT(!instr->IsControlInstruction());
HGraphBuilder* builder = owner();
builder->AddInstruction(instr);
// We expect a simulate after every expression with side effects, though
@@ -2107,18 +2134,31 @@ void TestContext::ReturnInstruction(HInstruction* instr, int ast_id) {
}
+void TestContext::ReturnControl(HControlInstruction* instr, int ast_id) {
+ ASSERT(!instr->HasSideEffects());
+ HBasicBlock* empty_true = owner()->graph()->CreateBasicBlock();
+ HBasicBlock* empty_false = owner()->graph()->CreateBasicBlock();
+ instr->SetSuccessorAt(0, empty_true);
+ instr->SetSuccessorAt(1, empty_false);
+ owner()->current_block()->Finish(instr);
+ empty_true->Goto(if_true());
+ empty_false->Goto(if_false());
+ owner()->set_current_block(NULL);
+}
+
+
void TestContext::BuildBranch(HValue* value) {
// We expect the graph to be in edge-split form: there is no edge that
// connects a branch node to a join node. We conservatively ensure that
// property by always adding an empty block on the outgoing edges of this
// branch.
HGraphBuilder* builder = owner();
- if (value->CheckFlag(HValue::kIsArguments)) {
+ if (value != NULL && value->CheckFlag(HValue::kIsArguments)) {
builder->Bailout("arguments object value in a test context");
}
HBasicBlock* empty_true = builder->graph()->CreateBasicBlock();
HBasicBlock* empty_false = builder->graph()->CreateBasicBlock();
- HTest* test = new(zone()) HTest(value, empty_true, empty_false);
+ HBranch* test = new(zone()) HBranch(value, empty_true, empty_false);
builder->current_block()->Finish(test);
empty_true->Goto(if_true());
@@ -2624,15 +2664,16 @@ void HGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
// Otherwise generate a compare and branch.
CHECK_ALIVE(VisitForValue(clause->label()));
HValue* label_value = Pop();
- HCompare* compare =
- new(zone()) HCompare(tag_value, label_value, Token::EQ_STRICT);
+ HCompareIDAndBranch* compare =
+ new(zone()) HCompareIDAndBranch(tag_value,
+ label_value,
+ Token::EQ_STRICT);
compare->SetInputRepresentation(Representation::Integer32());
- ASSERT(!compare->HasSideEffects());
- AddInstruction(compare);
HBasicBlock* body_block = graph()->CreateBasicBlock();
HBasicBlock* next_test_block = graph()->CreateBasicBlock();
- HTest* branch = new(zone()) HTest(compare, body_block, next_test_block);
- current_block()->Finish(branch);
+ compare->SetSuccessorAt(0, body_block);
+ compare->SetSuccessorAt(1, next_test_block);
+ current_block()->Finish(compare);
set_current_block(next_test_block);
}
@@ -2718,7 +2759,7 @@ void HGraphBuilder::PreProcessOsrEntry(IterationStatement* statement) {
HBasicBlock* non_osr_entry = graph()->CreateBasicBlock();
HBasicBlock* osr_entry = graph()->CreateBasicBlock();
HValue* true_value = graph()->GetConstantTrue();
- HTest* test = new(zone()) HTest(true_value, non_osr_entry, osr_entry);
+ HBranch* test = new(zone()) HBranch(true_value, non_osr_entry, osr_entry);
current_block()->Finish(test);
HBasicBlock* loop_predecessor = graph()->CreateBasicBlock();
@@ -2962,7 +3003,7 @@ void HGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
if (HasStackOverflow()) return;
HFunctionLiteral* instr =
new(zone()) HFunctionLiteral(shared_info, expr->pretenure());
- ast_context()->ReturnInstruction(instr, expr->id());
+ return ast_context()->ReturnInstruction(instr, expr->id());
}
@@ -3007,7 +3048,7 @@ void HGraphBuilder::VisitConditional(Conditional* expr) {
HBasicBlock* join = CreateJoin(cond_true, cond_false, expr->id());
set_current_block(join);
if (join != NULL && !ast_context()->IsEffect()) {
- ast_context()->ReturnValue(Pop());
+ return ast_context()->ReturnValue(Pop());
}
}
}
@@ -3057,7 +3098,7 @@ void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
value == graph()->GetConstantHole()) {
return Bailout("reference to uninitialized const variable");
}
- ast_context()->ReturnValue(value);
+ return ast_context()->ReturnValue(value);
} else if (variable->IsContextSlot()) {
if (variable->mode() == Variable::CONST) {
return Bailout("reference to const context slot");
@@ -3065,7 +3106,7 @@ void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
HValue* context = BuildContextChainWalk(variable);
int index = variable->AsSlot()->index();
HLoadContextSlot* instr = new(zone()) HLoadContextSlot(context, index);
- ast_context()->ReturnInstruction(instr, expr->id());
+ return ast_context()->ReturnInstruction(instr, expr->id());
} else if (variable->is_global()) {
LookupResult lookup;
GlobalPropertyAccess type = LookupGlobalProperty(variable, &lookup, false);
@@ -3080,7 +3121,7 @@ void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup));
bool check_hole = !lookup.IsDontDelete() || lookup.IsReadOnly();
HLoadGlobalCell* instr = new(zone()) HLoadGlobalCell(cell, check_hole);
- ast_context()->ReturnInstruction(instr, expr->id());
+ return ast_context()->ReturnInstruction(instr, expr->id());
} else {
HValue* context = environment()->LookupContext();
HGlobalObject* global_object = new(zone()) HGlobalObject(context);
@@ -3092,7 +3133,7 @@ void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
ast_context()->is_for_typeof());
instr->set_position(expr->position());
ASSERT(instr->HasSideEffects());
- ast_context()->ReturnInstruction(instr, expr->id());
+ return ast_context()->ReturnInstruction(instr, expr->id());
}
} else {
return Bailout("reference to a variable which requires dynamic lookup");
@@ -3106,7 +3147,7 @@ void HGraphBuilder::VisitLiteral(Literal* expr) {
ASSERT(current_block()->HasPredecessor());
HConstant* instr =
new(zone()) HConstant(expr->handle(), Representation::Tagged());
- ast_context()->ReturnInstruction(instr, expr->id());
+ return ast_context()->ReturnInstruction(instr, expr->id());
}
@@ -3117,7 +3158,7 @@ void HGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
HRegExpLiteral* instr = new(zone()) HRegExpLiteral(expr->pattern(),
expr->flags(),
expr->literal_index());
- ast_context()->ReturnInstruction(instr, expr->id());
+ return ast_context()->ReturnInstruction(instr, expr->id());
}
@@ -3187,9 +3228,9 @@ void HGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
// (e.g. because of code motion).
HToFastProperties* result = new(zone()) HToFastProperties(Pop());
AddInstruction(result);
- ast_context()->ReturnValue(result);
+ return ast_context()->ReturnValue(result);
} else {
- ast_context()->ReturnValue(Pop());
+ return ast_context()->ReturnValue(Pop());
}
}
@@ -3233,7 +3274,7 @@ void HGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
AddInstruction(new(zone()) HStoreKeyedFastElement(elements, key, value));
AddSimulate(expr->GetIdForElement(i));
}
- ast_context()->ReturnValue(Pop());
+ return ast_context()->ReturnValue(Pop());
}
@@ -3395,15 +3436,14 @@ void HGraphBuilder::HandlePolymorphicStoreNamedField(Assignment* expr,
Drop(1);
}
}
- ast_context()->ReturnValue(value);
- return;
+ return ast_context()->ReturnValue(value);
}
}
ASSERT(join != NULL);
join->SetJoinId(expr->id());
set_current_block(join);
- if (!ast_context()->IsEffect()) ast_context()->ReturnValue(Pop());
+ if (!ast_context()->IsEffect()) return ast_context()->ReturnValue(Pop());
}
@@ -3455,14 +3495,13 @@ void HGraphBuilder::HandlePropertyAssignment(Assignment* expr) {
Push(value);
ASSERT(has_side_effects); // Stores always have side effects.
AddSimulate(expr->AssignmentId());
- ast_context()->ReturnValue(Pop());
- return;
+ return ast_context()->ReturnValue(Pop());
}
Push(value);
instr->set_position(expr->position());
AddInstruction(instr);
if (instr->HasSideEffects()) AddSimulate(expr->AssignmentId());
- ast_context()->ReturnValue(Pop());
+ return ast_context()->ReturnValue(Pop());
}
@@ -3550,7 +3589,7 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
} else {
return Bailout("compound assignment to lookup slot");
}
- ast_context()->ReturnValue(Pop());
+ return ast_context()->ReturnValue(Pop());
} else if (prop != NULL) {
prop->RecordTypeFeedback(oracle());
@@ -3585,7 +3624,7 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
Drop(2);
Push(instr);
if (store->HasSideEffects()) AddSimulate(expr->AssignmentId());
- ast_context()->ReturnValue(Pop());
+ return ast_context()->ReturnValue(Pop());
} else {
// Keyed property.
@@ -3622,7 +3661,7 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
Push(instr);
ASSERT(has_side_effects); // Stores always have side effects.
AddSimulate(expr->AssignmentId());
- ast_context()->ReturnValue(Pop());
+ return ast_context()->ReturnValue(Pop());
}
} else {
@@ -3669,7 +3708,7 @@ void HGraphBuilder::VisitAssignment(Assignment* expr) {
CHECK_ALIVE(VisitForValue(expr->value(), ARGUMENTS_ALLOWED));
HValue* value = Pop();
Bind(var, value);
- ast_context()->ReturnValue(value);
+ return ast_context()->ReturnValue(value);
} else if (var->IsContextSlot()) {
ASSERT(var->mode() != Variable::CONST);
@@ -3694,7 +3733,7 @@ void HGraphBuilder::VisitAssignment(Assignment* expr) {
new(zone()) HStoreContextSlot(context, index, Top());
AddInstruction(instr);
if (instr->HasSideEffects()) AddSimulate(expr->AssignmentId());
- ast_context()->ReturnValue(Pop());
+ return ast_context()->ReturnValue(Pop());
} else if (var->is_global()) {
CHECK_ALIVE(VisitForValue(expr->value()));
@@ -3702,7 +3741,7 @@ void HGraphBuilder::VisitAssignment(Assignment* expr) {
Top(),
expr->position(),
expr->AssignmentId());
- ast_context()->ReturnValue(Pop());
+ return ast_context()->ReturnValue(Pop());
} else {
return Bailout("assignment to LOOKUP or const CONTEXT variable");
@@ -3916,8 +3955,6 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
todo_external_array = true;
}
}
- // We can't treat dictionary elements here (need to deopt instead).
- type_todo[JSObject::DICTIONARY_ELEMENTS] = false;
// Support for FAST_DOUBLE_ELEMENTS isn't implemented yet, so we deopt.
type_todo[JSObject::FAST_DOUBLE_ELEMENTS] = false;
@@ -3935,9 +3972,12 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
for (JSObject::ElementsKind elements_kind = JSObject::FAST_ELEMENTS;
elements_kind <= JSObject::LAST_ELEMENTS_KIND;
elements_kind = JSObject::ElementsKind(elements_kind + 1)) {
- // After having handled FAST_ELEMENTS in the first run of the loop, we
- // need to add some code that's executed for all other cases.
- if (elements_kind == 1 && todo_external_array) {
+ // After having handled FAST_ELEMENTS and DICTIONARY_ELEMENTS, we
+ // need to add some code that's executed for all external array cases.
+ STATIC_ASSERT(JSObject::LAST_EXTERNAL_ARRAY_ELEMENTS_KIND ==
+ JSObject::LAST_ELEMENTS_KIND);
+ if (elements_kind == JSObject::FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND
+ && todo_external_array) {
elements = AddInstruction(new(zone()) HLoadElements(object));
// We need to forcibly prevent some ElementsKind-dependent instructions
// from being hoisted out of any loops they might occur in, because
@@ -3955,23 +3995,24 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
if (type_todo[elements_kind]) {
HBasicBlock* if_true = graph()->CreateBasicBlock();
HBasicBlock* if_false = graph()->CreateBasicBlock();
- HCompareConstantEq* compare = new(zone()) HCompareConstantEq(
- elements_kind_instr,
- elements_kind,
- Token::EQ_STRICT);
- AddInstruction(compare);
- HTest* branch = new(zone()) HTest(compare, if_true, if_false);
- current_block()->Finish(branch);
+ HCompareConstantEqAndBranch* compare =
+ new(zone()) HCompareConstantEqAndBranch(elements_kind_instr,
+ elements_kind,
+ Token::EQ_STRICT);
+ compare->SetSuccessorAt(0, if_true);
+ compare->SetSuccessorAt(1, if_false);
+ current_block()->Finish(compare);
set_current_block(if_true);
HInstruction* access;
if (elements_kind == JSObject::FAST_ELEMENTS) {
HBasicBlock* if_jsarray = graph()->CreateBasicBlock();
HBasicBlock* if_fastobject = graph()->CreateBasicBlock();
- HInstruction* typecheck =
- AddInstruction(new(zone()) HHasInstanceType(object, JS_ARRAY_TYPE));
- HTest* test = new(zone()) HTest(typecheck, if_jsarray, if_fastobject);
- current_block()->Finish(test);
+ HHasInstanceTypeAndBranch* typecheck =
+ new(zone()) HHasInstanceTypeAndBranch(object, JS_ARRAY_TYPE);
+ typecheck->SetSuccessorAt(0, if_jsarray);
+ typecheck->SetSuccessorAt(1, if_fastobject);
+ current_block()->Finish(typecheck);
set_current_block(if_jsarray);
HInstruction* length = new(zone()) HJSArrayLength(object);
@@ -4006,6 +4047,12 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
access = AddInstruction(
new(zone()) HLoadKeyedFastElement(elements, checked_key));
}
+ } else if (elements_kind == JSObject::DICTIONARY_ELEMENTS) {
+ if (is_store) {
+ access = AddInstruction(BuildStoreKeyedGeneric(object, key, val));
+ } else {
+ access = AddInstruction(BuildLoadKeyedGeneric(object, key));
+ }
} else { // External array elements.
access = AddInstruction(BuildExternalArrayElementAccess(
external_elements, checked_key, val, elements_kind, is_store));
@@ -4178,11 +4225,10 @@ void HGraphBuilder::VisitProperty(Property* expr) {
Drop(1);
}
}
- ast_context()->ReturnValue(load);
- return;
+ return ast_context()->ReturnValue(load);
}
instr->set_position(expr->position());
- ast_context()->ReturnInstruction(instr, expr->id());
+ return ast_context()->ReturnInstruction(instr, expr->id());
}
@@ -4270,8 +4316,7 @@ void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
if (!ast_context()->IsEffect()) Push(call);
current_block()->Goto(join);
} else {
- ast_context()->ReturnInstruction(call, expr->id());
- return;
+ return ast_context()->ReturnInstruction(call, expr->id());
}
}
@@ -4282,7 +4327,7 @@ void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
if (join->HasPredecessor()) {
set_current_block(join);
join->SetJoinId(expr->id());
- if (!ast_context()->IsEffect()) ast_context()->ReturnValue(Pop());
+ if (!ast_context()->IsEffect()) return ast_context()->ReturnValue(Pop());
} else {
set_current_block(NULL);
}
@@ -4430,6 +4475,13 @@ bool HGraphBuilder::TryInline(Call* expr) {
TraceInline(target, caller, "could not generate deoptimization info");
return false;
}
+ if (target_shared->scope_info() == SerializedScopeInfo::Empty()) {
+ // The scope info might not have been set if a lazily compiled
+ // function is inlined before being called for the first time.
+ Handle<SerializedScopeInfo> target_scope_info =
+ SerializedScopeInfo::Create(target_info.scope());
+ target_shared->set_scope_info(*target_scope_info);
+ }
target_shared->EnableDeoptimizationSupport(*target_info.code());
Compiler::RecordFunctionCompilation(Logger::FUNCTION_TAG,
&target_info,
@@ -4496,7 +4548,7 @@ bool HGraphBuilder::TryInline(Call* expr) {
// TODO(3168478): refactor to avoid this.
HBasicBlock* empty_true = graph()->CreateBasicBlock();
HBasicBlock* empty_false = graph()->CreateBasicBlock();
- HTest* test = new(zone()) HTest(undefined, empty_true, empty_false);
+ HBranch* test = new(zone()) HBranch(undefined, empty_true, empty_false);
current_block()->Finish(test);
empty_true->Goto(inlined_test_context()->if_true());
@@ -4709,8 +4761,7 @@ void HGraphBuilder::VisitCall(Call* expr) {
call = new(zone()) HCallKeyed(context, key, argument_count);
call->set_position(expr->position());
Drop(argument_count + 1); // 1 is the key.
- ast_context()->ReturnInstruction(call, expr->id());
- return;
+ return ast_context()->ReturnInstruction(call, expr->id());
}
// Named function call.
@@ -4834,7 +4885,7 @@ void HGraphBuilder::VisitCall(Call* expr) {
}
call->set_position(expr->position());
- ast_context()->ReturnInstruction(call, expr->id());
+ return ast_context()->ReturnInstruction(call, expr->id());
}
@@ -4856,7 +4907,7 @@ void HGraphBuilder::VisitCallNew(CallNew* expr) {
HCallNew* call = new(zone()) HCallNew(context, constructor, arg_count);
call->set_position(expr->position());
Drop(arg_count);
- ast_context()->ReturnInstruction(call, expr->id());
+ return ast_context()->ReturnInstruction(call, expr->id());
}
@@ -4908,7 +4959,7 @@ void HGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
new(zone()) HCallRuntime(name, function, argument_count);
call->set_position(RelocInfo::kNoPosition);
Drop(argument_count);
- ast_context()->ReturnInstruction(call, expr->id());
+ return ast_context()->ReturnInstruction(call, expr->id());
}
}
@@ -4936,26 +4987,26 @@ void HGraphBuilder::VisitDelete(UnaryOperation* expr) {
// Result of deleting non-property, non-variable reference is true.
// Evaluate the subexpression for side effects.
CHECK_ALIVE(VisitForEffect(expr->expression()));
- ast_context()->ReturnValue(graph()->GetConstantTrue());
+ return ast_context()->ReturnValue(graph()->GetConstantTrue());
} else if (var != NULL &&
!var->is_global() &&
var->AsSlot() != NULL &&
var->AsSlot()->type() != Slot::LOOKUP) {
// Result of deleting non-global, non-dynamic variables is false.
// The subexpression does not have side effects.
- ast_context()->ReturnValue(graph()->GetConstantFalse());
+ return ast_context()->ReturnValue(graph()->GetConstantFalse());
} else if (prop != NULL) {
if (prop->is_synthetic()) {
// Result of deleting parameters is false, even when they rewrite
// to accesses on the arguments object.
- ast_context()->ReturnValue(graph()->GetConstantFalse());
+ return ast_context()->ReturnValue(graph()->GetConstantFalse());
} else {
CHECK_ALIVE(VisitForValue(prop->obj()));
CHECK_ALIVE(VisitForValue(prop->key()));
HValue* key = Pop();
HValue* obj = Pop();
HDeleteProperty* instr = new(zone()) HDeleteProperty(obj, key);
- ast_context()->ReturnInstruction(instr, expr->id());
+ return ast_context()->ReturnInstruction(instr, expr->id());
}
} else if (var->is_global()) {
Bailout("delete with global variable");
@@ -4967,14 +5018,15 @@ void HGraphBuilder::VisitDelete(UnaryOperation* expr) {
void HGraphBuilder::VisitVoid(UnaryOperation* expr) {
CHECK_ALIVE(VisitForEffect(expr->expression()));
- ast_context()->ReturnValue(graph()->GetConstantUndefined());
+ return ast_context()->ReturnValue(graph()->GetConstantUndefined());
}
void HGraphBuilder::VisitTypeof(UnaryOperation* expr) {
CHECK_ALIVE(VisitForTypeOf(expr->expression()));
HValue* value = Pop();
- ast_context()->ReturnInstruction(new(zone()) HTypeof(value), expr->id());
+ return ast_context()->ReturnInstruction(new(zone()) HTypeof(value),
+ expr->id());
}
@@ -4982,7 +5034,7 @@ void HGraphBuilder::VisitAdd(UnaryOperation* expr) {
CHECK_ALIVE(VisitForValue(expr->expression()));
HValue* value = Pop();
HInstruction* instr = new(zone()) HMul(value, graph_->GetConstant1());
- ast_context()->ReturnInstruction(instr, expr->id());
+ return ast_context()->ReturnInstruction(instr, expr->id());
}
@@ -4999,7 +5051,7 @@ void HGraphBuilder::VisitSub(UnaryOperation* expr) {
Representation rep = ToRepresentation(info);
TraceRepresentation(expr->op(), info, instr, rep);
instr->AssumeRepresentation(rep);
- ast_context()->ReturnInstruction(instr, expr->id());
+ return ast_context()->ReturnInstruction(instr, expr->id());
}
@@ -5012,7 +5064,7 @@ void HGraphBuilder::VisitBitNot(UnaryOperation* expr) {
current_block()->MarkAsDeoptimizing();
}
HInstruction* instr = new(zone()) HBitNot(value);
- ast_context()->ReturnInstruction(instr, expr->id());
+ return ast_context()->ReturnInstruction(instr, expr->id());
}
@@ -5057,7 +5109,7 @@ void HGraphBuilder::VisitNot(UnaryOperation* expr) {
HBasicBlock* join =
CreateJoin(materialize_false, materialize_true, expr->id());
set_current_block(join);
- if (join != NULL) ast_context()->ReturnValue(Pop());
+ if (join != NULL) return ast_context()->ReturnValue(Pop());
}
@@ -5232,7 +5284,7 @@ void HGraphBuilder::VisitCountOperation(CountOperation* expr) {
}
Drop(returns_original_input ? 2 : 1);
- ast_context()->ReturnValue(expr->is_postfix() ? input : after);
+ return ast_context()->ReturnValue(expr->is_postfix() ? input : after);
}
@@ -5392,9 +5444,9 @@ void HGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) {
// We need an extra block to maintain edge-split form.
HBasicBlock* empty_block = graph()->CreateBasicBlock();
HBasicBlock* eval_right = graph()->CreateBasicBlock();
- HTest* test = is_logical_and
- ? new(zone()) HTest(Top(), eval_right, empty_block)
- : new(zone()) HTest(Top(), empty_block, eval_right);
+ HBranch* test = is_logical_and
+ ? new(zone()) HBranch(Top(), eval_right, empty_block)
+ : new(zone()) HBranch(Top(), empty_block, eval_right);
current_block()->Finish(test);
set_current_block(eval_right);
@@ -5404,7 +5456,7 @@ void HGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) {
HBasicBlock* join_block =
CreateJoin(empty_block, current_block(), expr->id());
set_current_block(join_block);
- ast_context()->ReturnValue(Pop());
+ return ast_context()->ReturnValue(Pop());
} else {
ASSERT(ast_context()->IsEffect());
@@ -5456,7 +5508,7 @@ void HGraphBuilder::VisitArithmeticExpression(BinaryOperation* expr) {
HValue* left = Pop();
HInstruction* instr = BuildBinaryOperation(expr, left, right);
instr->set_position(expr->position());
- ast_context()->ReturnInstruction(instr, expr->id());
+ return ast_context()->ReturnInstruction(instr, expr->id());
}
@@ -5494,9 +5546,9 @@ void HGraphBuilder::HandleLiteralCompareTypeof(CompareOperation* compare_expr,
Handle<String> check) {
CHECK_ALIVE(VisitForTypeOf(expr));
HValue* expr_value = Pop();
- HInstruction* instr = new(zone()) HTypeofIs(expr_value, check);
+ HTypeofIsAndBranch* instr = new(zone()) HTypeofIsAndBranch(expr_value, check);
instr->set_position(compare_expr->position());
- ast_context()->ReturnInstruction(instr, compare_expr->id());
+ return ast_context()->ReturnControl(instr, compare_expr->id());
}
@@ -5505,10 +5557,10 @@ void HGraphBuilder::HandleLiteralCompareUndefined(
CHECK_ALIVE(VisitForValue(expr));
HValue* lhs = Pop();
HValue* rhs = graph()->GetConstantUndefined();
- HInstruction* instr =
- new(zone()) HCompareObjectEq(lhs, rhs);
+ HCompareObjectEqAndBranch* instr =
+ new(zone()) HCompareObjectEqAndBranch(lhs, rhs);
instr->set_position(compare_expr->position());
- ast_context()->ReturnInstruction(instr, compare_expr->id());
+ return ast_context()->ReturnControl(instr, compare_expr->id());
}
@@ -5518,14 +5570,15 @@ void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
ASSERT(current_block()->HasPredecessor());
if (IsClassOfTest(expr)) {
CallRuntime* call = expr->left()->AsCallRuntime();
+ ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
Literal* literal = expr->right()->AsLiteral();
Handle<String> rhs = Handle<String>::cast(literal->handle());
- HInstruction* instr = new(zone()) HClassOfTest(value, rhs);
+ HClassOfTestAndBranch* instr =
+ new(zone()) HClassOfTestAndBranch(value, rhs);
instr->set_position(expr->position());
- ast_context()->ReturnInstruction(instr, expr->id());
- return;
+ return ast_context()->ReturnControl(instr, expr->id());
}
// Check for special cases that compare against literals.
@@ -5556,7 +5609,6 @@ void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
HValue* left = Pop();
Token::Value op = expr->op();
- HInstruction* instr = NULL;
if (op == Token::INSTANCEOF) {
// Check to see if the rhs of the instanceof is a global function not
// residing in new space. If it is we assume that the function will stay the
@@ -5587,13 +5639,20 @@ void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
// assumed to stay the same for this instanceof.
if (target.is_null()) {
HValue* context = environment()->LookupContext();
- instr = new(zone()) HInstanceOf(context, left, right);
+ HInstanceOf* result = new(zone()) HInstanceOf(context, left, right);
+ result->set_position(expr->position());
+ return ast_context()->ReturnInstruction(result, expr->id());
} else {
AddInstruction(new(zone()) HCheckFunction(right, target));
- instr = new(zone()) HInstanceOfKnownGlobal(left, target);
+ HInstanceOfKnownGlobal* result =
+ new(zone()) HInstanceOfKnownGlobal(left, target);
+ result->set_position(expr->position());
+ return ast_context()->ReturnInstruction(result, expr->id());
}
} else if (op == Token::IN) {
- instr = new(zone()) HIn(left, right);
+ HIn* result = new(zone()) HIn(left, right);
+ result->set_position(expr->position());
+ return ast_context()->ReturnInstruction(result, expr->id());
} else if (type_info.IsNonPrimitive()) {
switch (op) {
case Token::EQ:
@@ -5602,12 +5661,13 @@ void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
AddInstruction(HCheckInstanceType::NewIsSpecObject(left));
AddInstruction(new(zone()) HCheckNonSmi(right));
AddInstruction(HCheckInstanceType::NewIsSpecObject(right));
- instr = new(zone()) HCompareObjectEq(left, right);
- break;
+ HCompareObjectEqAndBranch* result =
+ new(zone()) HCompareObjectEqAndBranch(left, right);
+ result->set_position(expr->position());
+ return ast_context()->ReturnControl(result, expr->id());
}
default:
return Bailout("Unsupported non-primitive compare");
- break;
}
} else if (type_info.IsString() && oracle()->IsSymbolCompare(expr) &&
(op == Token::EQ || op == Token::EQ_STRICT)) {
@@ -5615,15 +5675,24 @@ void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
AddInstruction(HCheckInstanceType::NewIsSymbol(left));
AddInstruction(new(zone()) HCheckNonSmi(right));
AddInstruction(HCheckInstanceType::NewIsSymbol(right));
- instr = new(zone()) HCompareObjectEq(left, right);
+ HCompareObjectEqAndBranch* result =
+ new(zone()) HCompareObjectEqAndBranch(left, right);
+ result->set_position(expr->position());
+ return ast_context()->ReturnControl(result, expr->id());
} else {
- HCompare* compare = new(zone()) HCompare(left, right, op);
Representation r = ToRepresentation(type_info);
- compare->SetInputRepresentation(r);
- instr = compare;
+ if (r.IsTagged()) {
+ HCompareGeneric* result = new(zone()) HCompareGeneric(left, right, op);
+ result->set_position(expr->position());
+ return ast_context()->ReturnInstruction(result, expr->id());
+ } else {
+ HCompareIDAndBranch* result =
+ new(zone()) HCompareIDAndBranch(left, right, op);
+ result->set_position(expr->position());
+ result->SetInputRepresentation(r);
+ return ast_context()->ReturnControl(result, expr->id());
+ }
}
- instr->set_position(expr->position());
- ast_context()->ReturnInstruction(instr, expr->id());
}
@@ -5632,10 +5701,10 @@ void HGraphBuilder::VisitCompareToNull(CompareToNull* expr) {
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
CHECK_ALIVE(VisitForValue(expr->expression()));
-
HValue* value = Pop();
- HIsNull* compare = new(zone()) HIsNull(value, expr->is_strict());
- ast_context()->ReturnInstruction(compare, expr->id());
+ HIsNullAndBranch* instr =
+ new(zone()) HIsNullAndBranch(value, expr->is_strict());
+ return ast_context()->ReturnControl(instr, expr->id());
}
@@ -5668,8 +5737,8 @@ void HGraphBuilder::GenerateIsSmi(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
- HIsSmi* result = new(zone()) HIsSmi(value);
- ast_context()->ReturnInstruction(result, call->id());
+ HIsSmiAndBranch* result = new(zone()) HIsSmiAndBranch(value);
+ return ast_context()->ReturnControl(result, call->id());
}
@@ -5677,11 +5746,11 @@ void HGraphBuilder::GenerateIsSpecObject(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
- HHasInstanceType* result =
- new(zone()) HHasInstanceType(value,
- FIRST_SPEC_OBJECT_TYPE,
- LAST_SPEC_OBJECT_TYPE);
- ast_context()->ReturnInstruction(result, call->id());
+ HHasInstanceTypeAndBranch* result =
+ new(zone()) HHasInstanceTypeAndBranch(value,
+ FIRST_SPEC_OBJECT_TYPE,
+ LAST_SPEC_OBJECT_TYPE);
+ return ast_context()->ReturnControl(result, call->id());
}
@@ -5689,9 +5758,9 @@ void HGraphBuilder::GenerateIsFunction(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
- HHasInstanceType* result =
- new(zone()) HHasInstanceType(value, JS_FUNCTION_TYPE);
- ast_context()->ReturnInstruction(result, call->id());
+ HHasInstanceTypeAndBranch* result =
+ new(zone()) HHasInstanceTypeAndBranch(value, JS_FUNCTION_TYPE);
+ return ast_context()->ReturnControl(result, call->id());
}
@@ -5699,8 +5768,9 @@ void HGraphBuilder::GenerateHasCachedArrayIndex(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
- HHasCachedArrayIndex* result = new(zone()) HHasCachedArrayIndex(value);
- ast_context()->ReturnInstruction(result, call->id());
+ HHasCachedArrayIndexAndBranch* result =
+ new(zone()) HHasCachedArrayIndexAndBranch(value);
+ return ast_context()->ReturnControl(result, call->id());
}
@@ -5708,8 +5778,9 @@ void HGraphBuilder::GenerateIsArray(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
- HHasInstanceType* result = new(zone()) HHasInstanceType(value, JS_ARRAY_TYPE);
- ast_context()->ReturnInstruction(result, call->id());
+ HHasInstanceTypeAndBranch* result =
+ new(zone()) HHasInstanceTypeAndBranch(value, JS_ARRAY_TYPE);
+ return ast_context()->ReturnControl(result, call->id());
}
@@ -5717,9 +5788,9 @@ void HGraphBuilder::GenerateIsRegExp(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
- HHasInstanceType* result =
- new(zone()) HHasInstanceType(value, JS_REGEXP_TYPE);
- ast_context()->ReturnInstruction(result, call->id());
+ HHasInstanceTypeAndBranch* result =
+ new(zone()) HHasInstanceTypeAndBranch(value, JS_REGEXP_TYPE);
+ return ast_context()->ReturnControl(result, call->id());
}
@@ -5727,8 +5798,8 @@ void HGraphBuilder::GenerateIsObject(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
- HIsObject* test = new(zone()) HIsObject(value);
- ast_context()->ReturnInstruction(test, call->id());
+ HIsObjectAndBranch* result = new(zone()) HIsObjectAndBranch(value);
+ return ast_context()->ReturnControl(result, call->id());
}
@@ -5741,8 +5812,9 @@ void HGraphBuilder::GenerateIsUndetectableObject(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
- ast_context()->ReturnInstruction(new(zone()) HIsUndetectable(value),
- call->id());
+ HIsUndetectableAndBranch* result =
+ new(zone()) HIsUndetectableAndBranch(value);
+ return ast_context()->ReturnControl(result, call->id());
}
@@ -5760,9 +5832,10 @@ void HGraphBuilder::GenerateIsConstructCall(CallRuntime* call) {
// We are generating graph for inlined function. Currently
// constructor inlining is not supported and we can just return
// false from %_IsConstructCall().
- ast_context()->ReturnValue(graph()->GetConstantFalse());
+ return ast_context()->ReturnValue(graph()->GetConstantFalse());
} else {
- ast_context()->ReturnInstruction(new(zone()) HIsConstructCall, call->id());
+ return ast_context()->ReturnControl(new(zone()) HIsConstructCallAndBranch,
+ call->id());
}
}
@@ -5776,7 +5849,7 @@ void HGraphBuilder::GenerateArgumentsLength(CallRuntime* call) {
ASSERT(call->arguments()->length() == 0);
HInstruction* elements = AddInstruction(new(zone()) HArgumentsElements);
HArgumentsLength* result = new(zone()) HArgumentsLength(elements);
- ast_context()->ReturnInstruction(result, call->id());
+ return ast_context()->ReturnInstruction(result, call->id());
}
@@ -5792,7 +5865,7 @@ void HGraphBuilder::GenerateArguments(CallRuntime* call) {
HInstruction* length = AddInstruction(new(zone()) HArgumentsLength(elements));
HAccessArgumentsAt* result =
new(zone()) HAccessArgumentsAt(elements, length, index);
- ast_context()->ReturnInstruction(result, call->id());
+ return ast_context()->ReturnInstruction(result, call->id());
}
@@ -5809,7 +5882,7 @@ void HGraphBuilder::GenerateValueOf(CallRuntime* call) {
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
HValueOf* result = new(zone()) HValueOf(value);
- ast_context()->ReturnInstruction(result, call->id());
+ return ast_context()->ReturnInstruction(result, call->id());
}
@@ -5826,7 +5899,7 @@ void HGraphBuilder::GenerateStringCharCodeAt(CallRuntime* call) {
HValue* index = Pop();
HValue* string = Pop();
HStringCharCodeAt* result = BuildStringCharCodeAt(string, index);
- ast_context()->ReturnInstruction(result, call->id());
+ return ast_context()->ReturnInstruction(result, call->id());
}
@@ -5836,7 +5909,7 @@ void HGraphBuilder::GenerateStringCharFromCode(CallRuntime* call) {
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* char_code = Pop();
HStringCharFromCode* result = new(zone()) HStringCharFromCode(char_code);
- ast_context()->ReturnInstruction(result, call->id());
+ return ast_context()->ReturnInstruction(result, call->id());
}
@@ -5850,7 +5923,7 @@ void HGraphBuilder::GenerateStringCharAt(CallRuntime* call) {
HStringCharCodeAt* char_code = BuildStringCharCodeAt(string, index);
AddInstruction(char_code);
HStringCharFromCode* result = new(zone()) HStringCharFromCode(char_code);
- ast_context()->ReturnInstruction(result, call->id());
+ return ast_context()->ReturnInstruction(result, call->id());
}
@@ -5861,14 +5934,15 @@ void HGraphBuilder::GenerateObjectEquals(CallRuntime* call) {
CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
HValue* right = Pop();
HValue* left = Pop();
- HCompareObjectEq* result = new(zone()) HCompareObjectEq(left, right);
- ast_context()->ReturnInstruction(result, call->id());
+ HCompareObjectEqAndBranch* result =
+ new(zone()) HCompareObjectEqAndBranch(left, right);
+ return ast_context()->ReturnControl(result, call->id());
}
void HGraphBuilder::GenerateLog(CallRuntime* call) {
// %_Log is ignored in optimized code.
- ast_context()->ReturnValue(graph()->GetConstantUndefined());
+ return ast_context()->ReturnValue(graph()->GetConstantUndefined());
}
@@ -5885,7 +5959,7 @@ void HGraphBuilder::GenerateStringAdd(CallRuntime* call) {
HValue* context = environment()->LookupContext();
HCallStub* result = new(zone()) HCallStub(context, CodeStub::StringAdd, 2);
Drop(2);
- ast_context()->ReturnInstruction(result, call->id());
+ return ast_context()->ReturnInstruction(result, call->id());
}
@@ -5896,7 +5970,7 @@ void HGraphBuilder::GenerateSubString(CallRuntime* call) {
HValue* context = environment()->LookupContext();
HCallStub* result = new(zone()) HCallStub(context, CodeStub::SubString, 3);
Drop(3);
- ast_context()->ReturnInstruction(result, call->id());
+ return ast_context()->ReturnInstruction(result, call->id());
}
@@ -5908,7 +5982,7 @@ void HGraphBuilder::GenerateStringCompare(CallRuntime* call) {
HCallStub* result =
new(zone()) HCallStub(context, CodeStub::StringCompare, 2);
Drop(2);
- ast_context()->ReturnInstruction(result, call->id());
+ return ast_context()->ReturnInstruction(result, call->id());
}
@@ -5919,7 +5993,7 @@ void HGraphBuilder::GenerateRegExpExec(CallRuntime* call) {
HValue* context = environment()->LookupContext();
HCallStub* result = new(zone()) HCallStub(context, CodeStub::RegExpExec, 4);
Drop(4);
- ast_context()->ReturnInstruction(result, call->id());
+ return ast_context()->ReturnInstruction(result, call->id());
}
@@ -5931,7 +6005,7 @@ void HGraphBuilder::GenerateRegExpConstructResult(CallRuntime* call) {
HCallStub* result =
new(zone()) HCallStub(context, CodeStub::RegExpConstructResult, 3);
Drop(3);
- ast_context()->ReturnInstruction(result, call->id());
+ return ast_context()->ReturnInstruction(result, call->id());
}
@@ -5949,7 +6023,7 @@ void HGraphBuilder::GenerateNumberToString(CallRuntime* call) {
HCallStub* result =
new(zone()) HCallStub(context, CodeStub::NumberToString, 1);
Drop(1);
- ast_context()->ReturnInstruction(result, call->id());
+ return ast_context()->ReturnInstruction(result, call->id());
}
@@ -5976,7 +6050,7 @@ void HGraphBuilder::GenerateCallFunction(CallRuntime* call) {
HInvokeFunction* result =
new(zone()) HInvokeFunction(context, function, arg_count);
Drop(arg_count);
- ast_context()->ReturnInstruction(result, call->id());
+ return ast_context()->ReturnInstruction(result, call->id());
}
@@ -5988,7 +6062,7 @@ void HGraphBuilder::GenerateMathPow(CallRuntime* call) {
HValue* right = Pop();
HValue* left = Pop();
HPower* result = new(zone()) HPower(left, right);
- ast_context()->ReturnInstruction(result, call->id());
+ return ast_context()->ReturnInstruction(result, call->id());
}
@@ -6000,7 +6074,7 @@ void HGraphBuilder::GenerateMathSin(CallRuntime* call) {
new(zone()) HCallStub(context, CodeStub::TranscendentalCache, 1);
result->set_transcendental_type(TranscendentalCache::SIN);
Drop(1);
- ast_context()->ReturnInstruction(result, call->id());
+ return ast_context()->ReturnInstruction(result, call->id());
}
@@ -6012,7 +6086,7 @@ void HGraphBuilder::GenerateMathCos(CallRuntime* call) {
new(zone()) HCallStub(context, CodeStub::TranscendentalCache, 1);
result->set_transcendental_type(TranscendentalCache::COS);
Drop(1);
- ast_context()->ReturnInstruction(result, call->id());
+ return ast_context()->ReturnInstruction(result, call->id());
}
@@ -6024,7 +6098,7 @@ void HGraphBuilder::GenerateMathLog(CallRuntime* call) {
new(zone()) HCallStub(context, CodeStub::TranscendentalCache, 1);
result->set_transcendental_type(TranscendentalCache::LOG);
Drop(1);
- ast_context()->ReturnInstruction(result, call->id());
+ return ast_context()->ReturnInstruction(result, call->id());
}
@@ -6044,7 +6118,7 @@ void HGraphBuilder::GenerateGetCachedArrayIndex(CallRuntime* call) {
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
HGetCachedArrayIndex* result = new(zone()) HGetCachedArrayIndex(value);
- ast_context()->ReturnInstruction(result, call->id());
+ return ast_context()->ReturnInstruction(result, call->id());
}
@@ -6053,6 +6127,11 @@ void HGraphBuilder::GenerateFastAsciiArrayJoin(CallRuntime* call) {
}
+void HGraphBuilder::GenerateIsNativeOrStrictMode(CallRuntime* call) {
+ return Bailout("inlined runtime function: IsNativeOrStrictMode");
+}
+
+
#undef CHECK_BAILOUT
#undef CHECK_ALIVE
diff --git a/deps/v8/src/hydrogen.h b/deps/v8/src/hydrogen.h
index 9e3f316e7..bd5ef6a93 100644
--- a/deps/v8/src/hydrogen.h
+++ b/deps/v8/src/hydrogen.h
@@ -498,6 +498,12 @@ class AstContext {
// the instruction as value.
virtual void ReturnInstruction(HInstruction* instr, int ast_id) = 0;
+ // Finishes the current basic block and materialize a boolean for
+ // value context, nothing for effect, generate a branch for test context.
+ // Call this function in tail position in the Visit functions for
+ // expressions.
+ virtual void ReturnControl(HControlInstruction* instr, int ast_id) = 0;
+
void set_for_typeof(bool for_typeof) { for_typeof_ = for_typeof; }
bool is_for_typeof() { return for_typeof_; }
@@ -532,6 +538,7 @@ class EffectContext: public AstContext {
virtual void ReturnValue(HValue* value);
virtual void ReturnInstruction(HInstruction* instr, int ast_id);
+ virtual void ReturnControl(HControlInstruction* instr, int ast_id);
};
@@ -544,6 +551,7 @@ class ValueContext: public AstContext {
virtual void ReturnValue(HValue* value);
virtual void ReturnInstruction(HInstruction* instr, int ast_id);
+ virtual void ReturnControl(HControlInstruction* instr, int ast_id);
bool arguments_allowed() { return flag_ == ARGUMENTS_ALLOWED; }
@@ -566,6 +574,7 @@ class TestContext: public AstContext {
virtual void ReturnValue(HValue* value);
virtual void ReturnInstruction(HInstruction* instr, int ast_id);
+ virtual void ReturnControl(HControlInstruction* instr, int ast_id);
static TestContext* cast(AstContext* context) {
ASSERT(context->IsTest());
@@ -706,6 +715,10 @@ class HGraphBuilder: public AstVisitor {
void Bailout(const char* reason);
+ HBasicBlock* CreateJoin(HBasicBlock* first,
+ HBasicBlock* second,
+ int join_id);
+
private:
// Type of a member function that generates inline code for a native function.
typedef void (HGraphBuilder::*InlineFunctionGenerator)(CallRuntime* call);
@@ -779,10 +792,6 @@ class HGraphBuilder: public AstVisitor {
HBasicBlock* loop_entry,
BreakAndContinueInfo* break_info);
- HBasicBlock* CreateJoin(HBasicBlock* first,
- HBasicBlock* second,
- int join_id);
-
// Create a back edge in the flow graph. body_exit is the predecessor
// block and loop_entry is the successor block. loop_successor is the
// block where control flow exits the loop normally (e.g., via failure of
diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h
index 6609b4fed..da38e138d 100644
--- a/deps/v8/src/ia32/assembler-ia32.h
+++ b/deps/v8/src/ia32/assembler-ia32.h
@@ -835,7 +835,7 @@ class Assembler : public AssemblerBase {
void call(const Operand& adr);
int CallSize(Handle<Code> code, RelocInfo::Mode mode);
void call(Handle<Code> code,
- RelocInfo::Mode rmode,
+ RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
unsigned ast_id = kNoASTId);
// Jumps
@@ -990,7 +990,9 @@ class Assembler : public AssemblerBase {
void Print();
// Check the code size generated from label to here.
- int SizeOfCodeGeneratedSince(Label* l) { return pc_offset() - l->pos(); }
+ int SizeOfCodeGeneratedSince(Label* label) {
+ return pc_offset() - label->pos();
+ }
// Mark address of the ExitJSFrame code.
void RecordJSReturn();
diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc
index d97206d0b..8624ed96b 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.cc
+++ b/deps/v8/src/ia32/code-stubs-ia32.cc
@@ -554,12 +554,10 @@ void UnaryOpStub::Generate(MacroAssembler* masm) {
void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
__ pop(ecx); // Save return address.
- __ push(eax);
- // the argument is now on top.
- // Push this stub's key. Although the operation and the type info are
- // encoded into the key, the encoding is opaque, so push them too.
- __ push(Immediate(Smi::FromInt(MinorKey())));
+
+ __ push(eax); // the operand
__ push(Immediate(Smi::FromInt(op_)));
+ __ push(Immediate(Smi::FromInt(mode_)));
__ push(Immediate(Smi::FromInt(operand_type_)));
__ push(ecx); // Push return address.
@@ -567,8 +565,7 @@ void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
// Patch the caller to an appropriate specialized stub and return the
// operation result to the caller of the stub.
__ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kUnaryOp_Patch),
- masm->isolate()), 4, 1);
+ ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
}
diff --git a/deps/v8/src/ia32/code-stubs-ia32.h b/deps/v8/src/ia32/code-stubs-ia32.h
index d51549d54..d02aa01d7 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.h
+++ b/deps/v8/src/ia32/code-stubs-ia32.h
@@ -62,16 +62,11 @@ class TranscendentalCacheStub: public CodeStub {
class UnaryOpStub: public CodeStub {
public:
- UnaryOpStub(Token::Value op, UnaryOverwriteMode mode)
+ UnaryOpStub(Token::Value op,
+ UnaryOverwriteMode mode,
+ UnaryOpIC::TypeInfo operand_type = UnaryOpIC::UNINITIALIZED)
: op_(op),
mode_(mode),
- operand_type_(UnaryOpIC::UNINITIALIZED),
- name_(NULL) {
- }
-
- UnaryOpStub(int key, UnaryOpIC::TypeInfo operand_type)
- : op_(OpBits::decode(key)),
- mode_(ModeBits::decode(key)),
operand_type_(operand_type),
name_(NULL) {
}
@@ -89,8 +84,7 @@ class UnaryOpStub: public CodeStub {
#ifdef DEBUG
void Print() {
- PrintF("TypeRecordingUnaryOpStub %d (op %s), "
- "(mode %d, runtime_type_info %s)\n",
+ PrintF("UnaryOpStub %d (op %s), (mode %d, runtime_type_info %s)\n",
MinorKey(),
Token::String(op_),
static_cast<int>(mode_),
diff --git a/deps/v8/src/ia32/deoptimizer-ia32.cc b/deps/v8/src/ia32/deoptimizer-ia32.cc
index 72fdac8c6..4ff1bfc35 100644
--- a/deps/v8/src/ia32/deoptimizer-ia32.cc
+++ b/deps/v8/src/ia32/deoptimizer-ia32.cc
@@ -348,6 +348,9 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
output_ = new FrameDescription*[1];
output_[0] = new(output_frame_size) FrameDescription(
output_frame_size, function_);
+#ifdef DEBUG
+ output_[0]->SetKind(Code::OPTIMIZED_FUNCTION);
+#endif
// Clear the incoming parameters in the optimized frame to avoid
// confusing the garbage collector.
@@ -461,6 +464,9 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
// Allocate and store the output frame description.
FrameDescription* output_frame =
new(output_frame_size) FrameDescription(output_frame_size, function);
+#ifdef DEBUG
+ output_frame->SetKind(Code::FUNCTION);
+#endif
bool is_bottommost = (0 == frame_index);
bool is_topmost = (output_count_ - 1 == frame_index);
@@ -587,7 +593,7 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
output_frame->SetState(Smi::FromInt(state));
// Set the continuation for the topmost frame.
- if (is_topmost) {
+ if (is_topmost && bailout_type_ != DEBUGGER) {
Builtins* builtins = isolate_->builtins();
Code* continuation = (bailout_type_ == EAGER)
? builtins->builtin(Builtins::kNotifyDeoptimized)
@@ -600,6 +606,27 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
}
+void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
+ // Set the register values. The values are not important as there are no
+ // callee saved registers in JavaScript frames, so all registers are
+ // spilled. Registers ebp and esp are set to the correct values though.
+
+ for (int i = 0; i < Register::kNumRegisters; i++) {
+ input_->SetRegister(i, i * 4);
+ }
+ input_->SetRegister(esp.code(), reinterpret_cast<intptr_t>(frame->sp()));
+ input_->SetRegister(ebp.code(), reinterpret_cast<intptr_t>(frame->fp()));
+ for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
+ input_->SetDoubleRegister(i, 0.0);
+ }
+
+ // Fill the frame content from the actual data on the frame.
+ for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
+ input_->SetFrameSlot(i, Memory::uint32_at(tos + i));
+ }
+}
+
+
#define __ masm()->
void Deoptimizer::EntryGenerator::Generate() {
diff --git a/deps/v8/src/ia32/full-codegen-ia32.cc b/deps/v8/src/ia32/full-codegen-ia32.cc
index c341b084c..75cc4b860 100644
--- a/deps/v8/src/ia32/full-codegen-ia32.cc
+++ b/deps/v8/src/ia32/full-codegen-ia32.cc
@@ -78,16 +78,18 @@ class JumpPatchSite BASE_EMBEDDED {
}
void EmitPatchInfo() {
- int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site_);
- ASSERT(is_int8(delta_to_patch_site));
- __ test(eax, Immediate(delta_to_patch_site));
+ if (patch_site_.is_bound()) {
+ int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site_);
+ ASSERT(is_int8(delta_to_patch_site));
+ __ test(eax, Immediate(delta_to_patch_site));
#ifdef DEBUG
- info_emitted_ = true;
+ info_emitted_ = true;
#endif
+ } else {
+ __ nop(); // Signals no inlined code.
+ }
}
- bool is_bound() const { return patch_site_.is_bound(); }
-
private:
// jc will be patched with jz, jnc will become jnz.
void EmitJump(Condition cc, Label* target, Label::Distance distance) {
@@ -121,6 +123,7 @@ class JumpPatchSite BASE_EMBEDDED {
void FullCodeGenerator::Generate(CompilationInfo* info) {
ASSERT(info_ == NULL);
info_ = info;
+ scope_ = info->scope();
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
@@ -140,7 +143,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
__ test(ecx, Operand(ecx));
__ j(zero, &ok, Label::kNear);
// +1 for return address.
- int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
+ int receiver_offset = (info->scope()->num_parameters() + 1) * kPointerSize;
__ mov(Operand(esp, receiver_offset),
Immediate(isolate()->factory()->undefined_value()));
__ bind(&ok);
@@ -152,7 +155,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
__ push(edi); // Callee's JS Function.
{ Comment cmnt(masm_, "[ Allocate locals");
- int locals_count = scope()->num_stack_slots();
+ int locals_count = info->scope()->num_stack_slots();
if (locals_count == 1) {
__ push(Immediate(isolate()->factory()->undefined_value()));
} else if (locals_count > 1) {
@@ -166,7 +169,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
bool function_in_register = true;
// Possibly allocate a local context.
- int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
Comment cmnt(masm_, "[ Allocate local context");
// Argument to NewContext is the function, which is still in edi.
@@ -183,7 +186,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
__ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
// Copy parameters into context if necessary.
- int num_parameters = scope()->num_parameters();
+ int num_parameters = info->scope()->num_parameters();
for (int i = 0; i < num_parameters; i++) {
Slot* slot = scope()->parameter(i)->AsSlot();
if (slot != NULL && slot->type() == Slot::CONTEXT) {
@@ -213,11 +216,12 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
__ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
}
// Receiver is just before the parameters on the caller's stack.
- int offset = scope()->num_parameters() * kPointerSize;
+ int num_parameters = info->scope()->num_parameters();
+ int offset = num_parameters * kPointerSize;
__ lea(edx,
Operand(ebp, StandardFrameConstants::kCallerSPOffset + offset));
__ push(edx);
- __ SafePush(Immediate(Smi::FromInt(scope()->num_parameters())));
+ __ SafePush(Immediate(Smi::FromInt(num_parameters)));
// Arguments to ArgumentsAccessStub and/or New...:
// function, receiver address, parameter count.
// The stub will rewrite receiver and parameter count if the previous
@@ -342,7 +346,7 @@ void FullCodeGenerator::EmitReturnSequence() {
__ mov(esp, ebp);
__ pop(ebp);
- int arguments_bytes = (scope()->num_parameters() + 1) * kPointerSize;
+ int arguments_bytes = (info_->scope()->num_parameters() + 1) * kPointerSize;
__ Ret(arguments_bytes, ecx);
#ifdef ENABLE_DEBUGGER_SUPPORT
// Check that the size of the code used for returning is large enough
@@ -754,7 +758,7 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
+ __ call(ic);
}
}
}
@@ -827,7 +831,8 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
- EmitCallIC(ic, &patch_site, clause->CompareId());
+ __ call(ic, RelocInfo::CODE_TARGET, clause->CompareId());
+ patch_site.EmitPatchInfo();
__ test(eax, Operand(eax));
__ j(not_equal, &next_test);
__ Drop(1); // Switch value is no longer needed.
@@ -1120,7 +1125,7 @@ void FullCodeGenerator::EmitLoadGlobalSlotCheckExtensions(
RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
? RelocInfo::CODE_TARGET
: RelocInfo::CODE_TARGET_CONTEXT;
- EmitCallIC(ic, mode, AstNode::kNoNumber);
+ __ call(ic, mode);
}
@@ -1200,7 +1205,7 @@ void FullCodeGenerator::EmitDynamicLoadFromSlotFastCase(
__ SafeSet(eax, Immediate(key_literal->handle()));
Handle<Code> ic =
isolate()->builtins()->KeyedLoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(property));
+ __ call(ic, RelocInfo::CODE_TARGET, GetPropertyId(property));
__ jmp(done);
}
}
@@ -1222,7 +1227,7 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var) {
__ mov(eax, GlobalObjectOperand());
__ mov(ecx, var->name());
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT, AstNode::kNoNumber);
+ __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
context()->Plug(eax);
} else if (slot->type() == Slot::LOOKUP) {
@@ -1368,7 +1373,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, key->id());
+ __ call(ic, RelocInfo::CODE_TARGET, key->id());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
VisitForEffect(value);
@@ -1601,14 +1606,14 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
ASSERT(!key->handle()->IsSmi());
__ mov(ecx, Immediate(key->handle()));
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+ __ call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
}
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+ __ call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
}
@@ -1629,7 +1634,8 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&stub_call);
__ mov(eax, ecx);
BinaryOpStub stub(op, mode);
- EmitCallIC(stub.GetCode(), &patch_site, expr->id());
+ __ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ patch_site.EmitPatchInfo();
__ jmp(&done, Label::kNear);
// Smi case.
@@ -1712,8 +1718,9 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
OverwriteMode mode) {
__ pop(edx);
BinaryOpStub stub(op, mode);
- // NULL signals no inlined smi code.
- EmitCallIC(stub.GetCode(), NULL, expr->id());
+ JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
+ __ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ patch_site.EmitPatchInfo();
context()->Plug(eax);
}
@@ -1753,7 +1760,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
+ __ call(ic);
break;
}
case KEYED_PROPERTY: {
@@ -1776,7 +1783,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
+ __ call(ic);
break;
}
}
@@ -1800,7 +1807,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT, AstNode::kNoNumber);
+ __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
} else if (op == Token::INIT_CONST) {
// Like var declarations, const declarations are hoisted to function
@@ -1893,7 +1900,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ __ call(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@@ -1933,7 +1940,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ __ call(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@@ -1984,7 +1991,7 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr,
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
Handle<Code> ic =
isolate()->stub_cache()->ComputeCallInitialize(arg_count, in_loop, mode);
- EmitCallIC(ic, mode, expr->id());
+ __ call(ic, mode, expr->id());
RecordJSReturnSite(expr);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
@@ -2017,7 +2024,7 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
Handle<Code> ic = isolate()->stub_cache()->ComputeKeyedCallInitialize(
arg_count, in_loop);
__ mov(ecx, Operand(esp, (arg_count + 1) * kPointerSize)); // Key.
- EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ __ call(ic, RelocInfo::CODE_TARGET, expr->id());
RecordJSReturnSite(expr);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
@@ -2056,7 +2063,7 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
}
// Push the receiver of the enclosing function.
- __ push(Operand(ebp, (2 + scope()->num_parameters()) * kPointerSize));
+ __ push(Operand(ebp, (2 + info_->scope()->num_parameters()) * kPointerSize));
// Push the strict mode flag.
__ push(Immediate(Smi::FromInt(strict_mode_flag())));
@@ -2193,7 +2200,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
} else {
// Call to a keyed property.
// For a synthetic property use keyed load IC followed by function call,
- // for a regular property use keyed EmitCallIC.
+ // for a regular property use EmitKeyedCallWithIC.
if (prop->is_synthetic()) {
// Do not visit the object and key subexpressions (they are shared
// by all occurrences of the same rewritten parameter).
@@ -2211,7 +2218,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
SetSourcePosition(prop->position());
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+ __ call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
// Push result (function).
__ push(eax);
// Push Global receiver.
@@ -2599,7 +2606,7 @@ void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
// parameter count in eax.
VisitForAccumulatorValue(args->at(0));
__ mov(edx, eax);
- __ SafeSet(eax, Immediate(Smi::FromInt(scope()->num_parameters())));
+ __ SafeSet(eax, Immediate(Smi::FromInt(info_->scope()->num_parameters())));
ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
__ CallStub(&stub);
context()->Plug(eax);
@@ -2611,7 +2618,7 @@ void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
Label exit;
// Get the number of formal parameters.
- __ SafeSet(eax, Immediate(Smi::FromInt(scope()->num_parameters())));
+ __ SafeSet(eax, Immediate(Smi::FromInt(info_->scope()->num_parameters())));
// Check if the calling frame is an arguments adaptor frame.
__ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
@@ -3527,6 +3534,39 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
}
+void FullCodeGenerator::EmitIsNativeOrStrictMode(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ // Load the function into eax.
+ VisitForAccumulatorValue(args->at(0));
+
+ // Prepare for the test.
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ // Test for strict mode function.
+ __ mov(ecx, FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset));
+ __ test_b(FieldOperand(ecx, SharedFunctionInfo::kStrictModeByteOffset),
+ 1 << SharedFunctionInfo::kStrictModeBitWithinByte);
+ __ j(not_equal, if_true);
+
+ // Test for native function.
+ __ test_b(FieldOperand(ecx, SharedFunctionInfo::kNativeByteOffset),
+ 1 << SharedFunctionInfo::kNativeBitWithinByte);
+ __ j(not_equal, if_true);
+
+ // Not native or strict-mode function.
+ __ jmp(if_false);
+
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ context()->Plug(if_true, if_false);
+}
+
+
void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
Handle<String> name = expr->name();
if (name->length() > 0 && name->Get(0) == '_') {
@@ -3557,7 +3597,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
Handle<Code> ic = isolate()->stub_cache()->ComputeCallInitialize(
arg_count, in_loop, mode);
- EmitCallIC(ic, mode, expr->id());
+ __ call(ic, mode, expr->id());
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
} else {
@@ -3696,7 +3736,7 @@ void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
// accumulator register eax.
VisitForAccumulatorValue(expr->expression());
SetSourcePosition(expr->position());
- EmitCallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ __ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
context()->Plug(eax);
}
@@ -3816,7 +3856,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ mov(edx, eax);
__ mov(eax, Immediate(Smi::FromInt(1)));
BinaryOpStub stub(expr->binary_op(), NO_OVERWRITE);
- EmitCallIC(stub.GetCode(), &patch_site, expr->CountId());
+ __ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId());
+ patch_site.EmitPatchInfo();
__ bind(&done);
// Store the value returned in eax.
@@ -3849,7 +3890,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ __ call(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -3866,7 +3907,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ __ call(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
// Result is on the stack
@@ -3894,7 +3935,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
// Use a regular load, not a contextual load, to avoid a reference
// error.
- EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
+ __ call(ic);
PrepareForBailout(expr, TOS_REG);
context()->Plug(eax);
} else if (proxy != NULL &&
@@ -4089,7 +4130,8 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC.
SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(op);
- EmitCallIC(ic, &patch_site, expr->id());
+ __ call(ic, RelocInfo::CODE_TARGET, expr->id());
+ patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
__ test(eax, Operand(eax));
@@ -4148,58 +4190,6 @@ Register FullCodeGenerator::context_register() {
}
-void FullCodeGenerator::EmitCallIC(Handle<Code> ic,
- RelocInfo::Mode mode,
- unsigned ast_id) {
- ASSERT(mode == RelocInfo::CODE_TARGET ||
- mode == RelocInfo::CODE_TARGET_CONTEXT);
- switch (ic->kind()) {
- case Code::LOAD_IC:
- __ IncrementCounter(isolate()->counters()->named_load_full(), 1);
- break;
- case Code::KEYED_LOAD_IC:
- __ IncrementCounter(isolate()->counters()->keyed_load_full(), 1);
- break;
- case Code::STORE_IC:
- __ IncrementCounter(isolate()->counters()->named_store_full(), 1);
- break;
- case Code::KEYED_STORE_IC:
- __ IncrementCounter(isolate()->counters()->keyed_store_full(), 1);
- default:
- break;
- }
- __ call(ic, mode, ast_id);
-}
-
-
-void FullCodeGenerator::EmitCallIC(Handle<Code> ic,
- JumpPatchSite* patch_site,
- unsigned ast_id) {
- Counters* counters = isolate()->counters();
- switch (ic->kind()) {
- case Code::LOAD_IC:
- __ IncrementCounter(counters->named_load_full(), 1);
- break;
- case Code::KEYED_LOAD_IC:
- __ IncrementCounter(counters->keyed_load_full(), 1);
- break;
- case Code::STORE_IC:
- __ IncrementCounter(counters->named_store_full(), 1);
- break;
- case Code::KEYED_STORE_IC:
- __ IncrementCounter(counters->keyed_store_full(), 1);
- default:
- break;
- }
- __ call(ic, RelocInfo::CODE_TARGET, ast_id);
- if (patch_site != NULL && patch_site->is_bound()) {
- patch_site->EmitPatchInfo();
- } else {
- __ nop(); // Signals no inlined code.
- }
-}
-
-
void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
__ mov(Operand(ebp, frame_offset), value);
@@ -4212,19 +4202,20 @@ void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
- if (scope()->is_global_scope()) {
+ Scope* declaration_scope = scope()->DeclarationScope();
+ if (declaration_scope->is_global_scope()) {
// Contexts nested in the global context have a canonical empty function
// as their closure, not the anonymous closure containing the global
// code. Pass a smi sentinel and let the runtime look up the empty
// function.
__ push(Immediate(Smi::FromInt(0)));
- } else if (scope()->is_eval_scope()) {
- // Contexts created by a call to eval have the same closure as the
- // context calling eval, not the anonymous closure containing the eval
- // code. Fetch it from the context.
+ } else if (declaration_scope->is_eval_scope()) {
+ // Contexts nested inside eval code have the same closure as the context
+ // calling eval, not the anonymous closure containing the eval code.
+ // Fetch it from the context.
__ push(ContextOperand(esi, Context::CLOSURE_INDEX));
} else {
- ASSERT(scope()->is_function_scope());
+ ASSERT(declaration_scope->is_function_scope());
__ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
}
}
@@ -4236,12 +4227,12 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
void FullCodeGenerator::EnterFinallyBlock() {
// Cook return address on top of stack (smi encoded Code* delta)
ASSERT(!result_register().is(edx));
- __ mov(edx, Operand(esp, 0));
+ __ pop(edx);
__ sub(Operand(edx), Immediate(masm_->CodeObject()));
ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
ASSERT_EQ(0, kSmiTag);
- __ add(edx, Operand(edx)); // Convert to smi.
- __ mov(Operand(esp, 0), edx);
+ __ SmiTag(edx);
+ __ push(edx);
// Store result register while executing finally block.
__ push(result_register());
}
@@ -4249,15 +4240,12 @@ void FullCodeGenerator::EnterFinallyBlock() {
void FullCodeGenerator::ExitFinallyBlock() {
ASSERT(!result_register().is(edx));
- // Restore result register from stack.
__ pop(result_register());
// Uncook return address.
- __ mov(edx, Operand(esp, 0));
- __ sar(edx, 1); // Convert smi to int.
+ __ pop(edx);
+ __ SmiUntag(edx);
__ add(Operand(edx), Immediate(masm_->CodeObject()));
- __ mov(Operand(esp, 0), edx);
- // And return.
- __ ret(0);
+ __ jmp(Operand(edx));
}
diff --git a/deps/v8/src/ia32/ic-ia32.cc b/deps/v8/src/ia32/ic-ia32.cc
index 0f5820254..be5910a12 100644
--- a/deps/v8/src/ia32/ic-ia32.cc
+++ b/deps/v8/src/ia32/ic-ia32.cc
@@ -528,6 +528,8 @@ static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
Register backing_store = parameter_map;
__ mov(backing_store, FieldOperand(parameter_map, kBackingStoreOffset));
+ Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
+ __ CheckMap(backing_store, fixed_array_map, slow_case, DONT_DO_SMI_CHECK);
__ mov(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset));
__ cmp(key, Operand(scratch));
__ j(greater_equal, slow_case);
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.cc b/deps/v8/src/ia32/lithium-codegen-ia32.cc
index 7f35310f7..080b2a0a7 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.cc
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.cc
@@ -1367,7 +1367,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
- Representation r = instr->hydrogen()->representation();
+ Representation r = instr->hydrogen()->value()->representation();
if (r.IsInteger32()) {
Register reg = ToRegister(instr->InputAt(0));
__ test(reg, Operand(reg));
@@ -1380,7 +1380,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
} else {
ASSERT(r.IsTagged());
Register reg = ToRegister(instr->InputAt(0));
- if (instr->hydrogen()->type().IsBoolean()) {
+ if (instr->hydrogen()->value()->type().IsBoolean()) {
__ cmp(reg, factory()->true_value());
EmitBranch(true_block, false_block, equal);
} else {
@@ -1474,32 +1474,6 @@ void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) {
}
-void LCodeGen::DoCmpID(LCmpID* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
- LOperand* result = instr->result();
-
- Label unordered;
- if (instr->is_double()) {
- // Don't base result on EFLAGS when a NaN is involved. Instead
- // jump to the unordered case, which produces a false value.
- __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
- __ j(parity_even, &unordered, Label::kNear);
- } else {
- EmitCmpI(left, right);
- }
-
- Label done;
- Condition cc = TokenToCondition(instr->op(), instr->is_double());
- __ mov(ToRegister(result), factory()->true_value());
- __ j(cc, &done, Label::kNear);
-
- __ bind(&unordered);
- __ mov(ToRegister(result), factory()->false_value());
- __ bind(&done);
-}
-
-
void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
LOperand* left = instr->InputAt(0);
LOperand* right = instr->InputAt(1);
@@ -1520,23 +1494,9 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
}
-void LCodeGen::DoCmpObjectEq(LCmpObjectEq* instr) {
- Register left = ToRegister(instr->InputAt(0));
- Register right = ToRegister(instr->InputAt(1));
- Register result = ToRegister(instr->result());
-
- __ cmp(left, Operand(right));
- __ mov(result, factory()->true_value());
- Label done;
- __ j(equal, &done, Label::kNear);
- __ mov(result, factory()->false_value());
- __ bind(&done);
-}
-
-
void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
Register left = ToRegister(instr->InputAt(0));
- Register right = ToRegister(instr->InputAt(1));
+ Operand right = ToOperand(instr->InputAt(1));
int false_block = chunk_->LookupDestination(instr->false_block_id());
int true_block = chunk_->LookupDestination(instr->true_block_id());
@@ -1545,19 +1505,6 @@ void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
}
-void LCodeGen::DoCmpConstantEq(LCmpConstantEq* instr) {
- Register left = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
-
- Label done;
- __ cmp(left, instr->hydrogen()->right());
- __ mov(result, factory()->true_value());
- __ j(equal, &done, Label::kNear);
- __ mov(result, factory()->false_value());
- __ bind(&done);
-}
-
-
void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
Register left = ToRegister(instr->InputAt(0));
int true_block = chunk_->LookupDestination(instr->true_block_id());
@@ -1568,43 +1515,6 @@ void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
}
-void LCodeGen::DoIsNull(LIsNull* instr) {
- Register reg = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
-
- // TODO(fsc): If the expression is known to be a smi, then it's
- // definitely not null. Materialize false.
-
- __ cmp(reg, factory()->null_value());
- if (instr->is_strict()) {
- __ mov(result, factory()->true_value());
- Label done;
- __ j(equal, &done, Label::kNear);
- __ mov(result, factory()->false_value());
- __ bind(&done);
- } else {
- Label true_value, false_value, done;
- __ j(equal, &true_value, Label::kNear);
- __ cmp(reg, factory()->undefined_value());
- __ j(equal, &true_value, Label::kNear);
- __ JumpIfSmi(reg, &false_value, Label::kNear);
- // Check for undetectable objects by looking in the bit field in
- // the map. The object has already been smi checked.
- Register scratch = result;
- __ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
- __ movzx_b(scratch, FieldOperand(scratch, Map::kBitFieldOffset));
- __ test(scratch, Immediate(1 << Map::kIsUndetectable));
- __ j(not_zero, &true_value, Label::kNear);
- __ bind(&false_value);
- __ mov(result, factory()->false_value());
- __ jmp(&done, Label::kNear);
- __ bind(&true_value);
- __ mov(result, factory()->true_value());
- __ bind(&done);
- }
-}
-
-
void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
Register reg = ToRegister(instr->InputAt(0));
@@ -1658,25 +1568,6 @@ Condition LCodeGen::EmitIsObject(Register input,
}
-void LCodeGen::DoIsObject(LIsObject* instr) {
- Register reg = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
- Label is_false, is_true, done;
-
- Condition true_cond = EmitIsObject(reg, result, &is_false, &is_true);
- __ j(true_cond, &is_true);
-
- __ bind(&is_false);
- __ mov(result, factory()->false_value());
- __ jmp(&done);
-
- __ bind(&is_true);
- __ mov(result, factory()->true_value());
-
- __ bind(&done);
-}
-
-
void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
Register reg = ToRegister(instr->InputAt(0));
Register temp = ToRegister(instr->TempAt(0));
@@ -1692,19 +1583,6 @@ void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
}
-void LCodeGen::DoIsSmi(LIsSmi* instr) {
- Operand input = ToOperand(instr->InputAt(0));
- Register result = ToRegister(instr->result());
-
- ASSERT(instr->hydrogen()->value()->representation().IsTagged());
- Label done;
- __ mov(result, factory()->true_value());
- __ JumpIfSmi(input, &done, Label::kNear);
- __ mov(result, factory()->false_value());
- __ bind(&done);
-}
-
-
void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
Operand input = ToOperand(instr->InputAt(0));
@@ -1716,26 +1594,6 @@ void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
}
-void LCodeGen::DoIsUndetectable(LIsUndetectable* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
-
- ASSERT(instr->hydrogen()->value()->representation().IsTagged());
- Label false_label, done;
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(input, &false_label, Label::kNear);
- __ mov(result, FieldOperand(input, HeapObject::kMapOffset));
- __ test_b(FieldOperand(result, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- __ j(zero, &false_label, Label::kNear);
- __ mov(result, factory()->true_value());
- __ jmp(&done);
- __ bind(&false_label);
- __ mov(result, factory()->false_value());
- __ bind(&done);
-}
-
-
void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
Register input = ToRegister(instr->InputAt(0));
Register temp = ToRegister(instr->TempAt(0));
@@ -1752,7 +1610,7 @@ void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
}
-static InstanceType TestType(HHasInstanceType* instr) {
+static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
InstanceType from = instr->from();
InstanceType to = instr->to();
if (from == FIRST_TYPE) return to;
@@ -1761,7 +1619,7 @@ static InstanceType TestType(HHasInstanceType* instr) {
}
-static Condition BranchCondition(HHasInstanceType* instr) {
+static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
InstanceType from = instr->from();
InstanceType to = instr->to();
if (from == to) return equal;
@@ -1772,24 +1630,6 @@ static Condition BranchCondition(HHasInstanceType* instr) {
}
-void LCodeGen::DoHasInstanceType(LHasInstanceType* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
-
- ASSERT(instr->hydrogen()->value()->representation().IsTagged());
- Label done, is_false;
- __ JumpIfSmi(input, &is_false, Label::kNear);
- __ CmpObjectType(input, TestType(instr->hydrogen()), result);
- __ j(NegateCondition(BranchCondition(instr->hydrogen())),
- &is_false, Label::kNear);
- __ mov(result, factory()->true_value());
- __ jmp(&done, Label::kNear);
- __ bind(&is_false);
- __ mov(result, factory()->false_value());
- __ bind(&done);
-}
-
-
void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
Register input = ToRegister(instr->InputAt(0));
Register temp = ToRegister(instr->TempAt(0));
@@ -1819,21 +1659,6 @@ void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
}
-void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
-
- ASSERT(instr->hydrogen()->value()->representation().IsTagged());
- __ mov(result, factory()->true_value());
- __ test(FieldOperand(input, String::kHashFieldOffset),
- Immediate(String::kContainsCachedArrayIndexMask));
- Label done;
- __ j(zero, &done, Label::kNear);
- __ mov(result, factory()->false_value());
- __ bind(&done);
-}
-
-
void LCodeGen::DoHasCachedArrayIndexAndBranch(
LHasCachedArrayIndexAndBranch* instr) {
Register input = ToRegister(instr->InputAt(0));
@@ -1904,29 +1729,6 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
}
-void LCodeGen::DoClassOfTest(LClassOfTest* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
- ASSERT(input.is(result));
- Register temp = ToRegister(instr->TempAt(0));
- Handle<String> class_name = instr->hydrogen()->class_name();
- Label done;
- Label is_true, is_false;
-
- EmitClassOfTest(&is_true, &is_false, class_name, input, temp, input);
-
- __ j(not_equal, &is_false, Label::kNear);
-
- __ bind(&is_true);
- __ mov(result, factory()->true_value());
- __ jmp(&done, Label::kNear);
-
- __ bind(&is_false);
- __ mov(result, factory()->false_value());
- __ bind(&done);
-}
-
-
void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
Register input = ToRegister(instr->InputAt(0));
Register temp = ToRegister(instr->TempAt(0));
@@ -3882,14 +3684,14 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->InputAt(0);
- __ test(ToRegister(input), Immediate(kSmiTagMask));
+ __ test(ToOperand(input), Immediate(kSmiTagMask));
DeoptimizeIf(not_zero, instr->environment());
}
void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
LOperand* input = instr->InputAt(0);
- __ test(ToRegister(input), Immediate(kSmiTagMask));
+ __ test(ToOperand(input), Immediate(kSmiTagMask));
DeoptimizeIf(zero, instr->environment());
}
@@ -3941,8 +3743,8 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
ASSERT(instr->InputAt(0)->IsRegister());
- Register reg = ToRegister(instr->InputAt(0));
- __ cmp(reg, instr->hydrogen()->target());
+ Operand operand = ToOperand(instr->InputAt(0));
+ __ cmp(operand, instr->hydrogen()->target());
DeoptimizeIf(not_equal, instr->environment());
}
@@ -4189,29 +3991,6 @@ void LCodeGen::DoTypeof(LTypeof* instr) {
}
-void LCodeGen::DoTypeofIs(LTypeofIs* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
- Label true_label;
- Label false_label;
- Label done;
-
- Condition final_branch_condition = EmitTypeofIs(&true_label,
- &false_label,
- input,
- instr->type_literal());
- __ j(final_branch_condition, &true_label, Label::kNear);
- __ bind(&false_label);
- __ mov(result, factory()->false_value());
- __ jmp(&done, Label::kNear);
-
- __ bind(&true_label);
- __ mov(result, factory()->true_value());
-
- __ bind(&done);
-}
-
-
void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
Register input = ToRegister(instr->InputAt(0));
int true_block = chunk_->LookupDestination(instr->true_block_id());
@@ -4292,24 +4071,6 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
}
-void LCodeGen::DoIsConstructCall(LIsConstructCall* instr) {
- Register result = ToRegister(instr->result());
- Label true_label;
- Label done;
-
- EmitIsConstructCall(result);
- __ j(equal, &true_label, Label::kNear);
-
- __ mov(result, factory()->false_value());
- __ jmp(&done, Label::kNear);
-
- __ bind(&true_label);
- __ mov(result, factory()->true_value());
-
- __ bind(&done);
-}
-
-
void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
Register temp = ToRegister(instr->TempAt(0));
int true_block = chunk_->LookupDestination(instr->true_block_id());
diff --git a/deps/v8/src/ia32/lithium-ia32.cc b/deps/v8/src/ia32/lithium-ia32.cc
index 40e4badbe..db690479c 100644
--- a/deps/v8/src/ia32/lithium-ia32.cc
+++ b/deps/v8/src/ia32/lithium-ia32.cc
@@ -267,12 +267,6 @@ void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
}
-void LTypeofIs::PrintDataTo(StringStream* stream) {
- InputAt(0)->PrintTo(stream);
- stream->Add(" == \"%s\"", *hydrogen()->type_literal()->ToCString());
-}
-
-
void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if typeof ");
InputAt(0)->PrintTo(stream);
@@ -344,13 +338,6 @@ void LCallNew::PrintDataTo(StringStream* stream) {
}
-void LClassOfTest::PrintDataTo(StringStream* stream) {
- stream->Add("= class_of_test(");
- InputAt(0)->PrintTo(stream);
- stream->Add(", \"%o\")", *hydrogen()->class_name());
-}
-
-
void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
arguments()->PrintTo(stream);
@@ -985,18 +972,7 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
if (FLAG_stress_environments && !instr->HasEnvironment()) {
instr = AssignEnvironment(instr);
}
- if (current->IsTest() && !instr->IsGoto()) {
- ASSERT(instr->IsControl());
- HTest* test = HTest::cast(current);
- instr->set_hydrogen_value(test->value());
- HBasicBlock* first = test->FirstSuccessor();
- HBasicBlock* second = test->SecondSuccessor();
- ASSERT(first != NULL && second != NULL);
- instr->SetBranchTargets(first->block_id(), second->block_id());
- } else {
- instr->set_hydrogen_value(current);
- }
-
+ instr->set_hydrogen_value(current);
chunk_->AddInstruction(instr, current_block_);
}
current_instruction_ = old_current;
@@ -1041,84 +1017,17 @@ LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
}
-LInstruction* LChunkBuilder::DoTest(HTest* instr) {
+LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
HValue* v = instr->value();
- if (!v->EmitAtUses()) return new LBranch(UseRegisterAtStart(v));
- ASSERT(!v->HasSideEffects());
- if (v->IsClassOfTest()) {
- HClassOfTest* compare = HClassOfTest::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
- return new LClassOfTestAndBranch(UseTempRegister(compare->value()),
- TempRegister(),
- TempRegister());
- } else if (v->IsCompare()) {
- HCompare* compare = HCompare::cast(v);
- HValue* left = compare->left();
- HValue* right = compare->right();
- Representation r = compare->GetInputRepresentation();
- if (r.IsInteger32()) {
- ASSERT(left->representation().IsInteger32());
- ASSERT(right->representation().IsInteger32());
- return new LCmpIDAndBranch(UseRegisterAtStart(left),
- UseOrConstantAtStart(right));
- } else {
- ASSERT(r.IsDouble());
- ASSERT(left->representation().IsDouble());
- ASSERT(right->representation().IsDouble());
- return new LCmpIDAndBranch(UseRegisterAtStart(left),
- UseRegisterAtStart(right));
- }
- } else if (v->IsIsSmi()) {
- HIsSmi* compare = HIsSmi::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
- return new LIsSmiAndBranch(Use(compare->value()));
- } else if (v->IsIsUndetectable()) {
- HIsUndetectable* compare = HIsUndetectable::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
- return new LIsUndetectableAndBranch(UseRegisterAtStart(compare->value()),
- TempRegister());
- } else if (v->IsHasInstanceType()) {
- HHasInstanceType* compare = HHasInstanceType::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
- return new LHasInstanceTypeAndBranch(UseRegisterAtStart(compare->value()),
- TempRegister());
- } else if (v->IsHasCachedArrayIndex()) {
- HHasCachedArrayIndex* compare = HHasCachedArrayIndex::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
- return new LHasCachedArrayIndexAndBranch(
- UseRegisterAtStart(compare->value()));
- } else if (v->IsIsNull()) {
- HIsNull* compare = HIsNull::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
- // We only need a temp register for non-strict compare.
- LOperand* temp = compare->is_strict() ? NULL : TempRegister();
- return new LIsNullAndBranch(UseRegisterAtStart(compare->value()), temp);
- } else if (v->IsIsObject()) {
- HIsObject* compare = HIsObject::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
- LOperand* temp = TempRegister();
- return new LIsObjectAndBranch(UseRegister(compare->value()), temp);
- } else if (v->IsCompareObjectEq()) {
- HCompareObjectEq* compare = HCompareObjectEq::cast(v);
- return new LCmpObjectEqAndBranch(UseRegisterAtStart(compare->left()),
- UseRegisterAtStart(compare->right()));
- } else if (v->IsCompareConstantEq()) {
- HCompareConstantEq* compare = HCompareConstantEq::cast(v);
- return new LCmpConstantEqAndBranch(UseRegisterAtStart(compare->value()));
- } else if (v->IsTypeofIs()) {
- HTypeofIs* typeof_is = HTypeofIs::cast(v);
- return new LTypeofIsAndBranch(UseTempRegister(typeof_is->value()));
- } else if (v->IsIsConstructCall()) {
- return new LIsConstructCallAndBranch(TempRegister());
- } else if (v->IsConstant()) {
+ if (v->EmitAtUses()) {
+ ASSERT(v->IsConstant());
+ ASSERT(!v->representation().IsDouble());
HBasicBlock* successor = HConstant::cast(v)->ToBoolean()
? instr->FirstSuccessor()
: instr->SecondSuccessor();
return new LGoto(successor->block_id());
- } else {
- Abort("Undefined compare before branch");
- return NULL;
}
+ return new LBranch(UseRegisterAtStart(v));
}
@@ -1489,85 +1398,85 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
}
-LInstruction* LChunkBuilder::DoCompare(HCompare* instr) {
+LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
Token::Value op = instr->token();
+ ASSERT(instr->left()->representation().IsTagged());
+ ASSERT(instr->right()->representation().IsTagged());
+ bool reversed = (op == Token::GT || op == Token::LTE);
+ LOperand* left = UseFixed(instr->left(), reversed ? eax : edx);
+ LOperand* right = UseFixed(instr->right(), reversed ? edx : eax);
+ LCmpT* result = new LCmpT(left, right);
+ return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareIDAndBranch(
+ HCompareIDAndBranch* instr) {
Representation r = instr->GetInputRepresentation();
if (r.IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
- return DefineAsRegister(new LCmpID(left, right));
- } else if (r.IsDouble()) {
+ return new LCmpIDAndBranch(left, right);
+ } else {
+ ASSERT(r.IsDouble());
ASSERT(instr->left()->representation().IsDouble());
ASSERT(instr->right()->representation().IsDouble());
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
- return DefineAsRegister(new LCmpID(left, right));
- } else {
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
- bool reversed = (op == Token::GT || op == Token::LTE);
- LOperand* left = UseFixed(instr->left(), reversed ? eax : edx);
- LOperand* right = UseFixed(instr->right(), reversed ? edx : eax);
- LCmpT* result = new LCmpT(left, right);
- return MarkAsCall(DefineFixed(result, eax), instr);
+ return new LCmpIDAndBranch(left, right);
}
}
-LInstruction* LChunkBuilder::DoCompareObjectEq(HCompareObjectEq* instr) {
+LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
+ HCompareObjectEqAndBranch* instr) {
LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- LCmpObjectEq* result = new LCmpObjectEq(left, right);
- return DefineAsRegister(result);
+ LOperand* right = UseAtStart(instr->right());
+ return new LCmpObjectEqAndBranch(left, right);
}
-LInstruction* LChunkBuilder::DoCompareConstantEq(
- HCompareConstantEq* instr) {
- LOperand* left = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LCmpConstantEq(left));
+LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
+ HCompareConstantEqAndBranch* instr) {
+ return new LCmpConstantEqAndBranch(UseRegisterAtStart(instr->value()));
}
-LInstruction* LChunkBuilder::DoIsNull(HIsNull* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
-
- return DefineAsRegister(new LIsNull(value));
+LInstruction* LChunkBuilder::DoIsNullAndBranch(HIsNullAndBranch* instr) {
+ // We only need a temp register for non-strict compare.
+ LOperand* temp = instr->is_strict() ? NULL : TempRegister();
+ return new LIsNullAndBranch(UseRegisterAtStart(instr->value()), temp);
}
-LInstruction* LChunkBuilder::DoIsObject(HIsObject* instr) {
+LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegister(instr->value());
-
- return DefineAsRegister(new LIsObject(value));
+ LOperand* temp = TempRegister();
+ return new LIsObjectAndBranch(UseRegister(instr->value()), temp);
}
-LInstruction* LChunkBuilder::DoIsSmi(HIsSmi* instr) {
+LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseAtStart(instr->value());
-
- return DefineAsRegister(new LIsSmi(value));
+ return new LIsSmiAndBranch(Use(instr->value()));
}
-LInstruction* LChunkBuilder::DoIsUndetectable(HIsUndetectable* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
-
- return DefineAsRegister(new LIsUndetectable(value));
+LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
+ HIsUndetectableAndBranch* instr) {
+ ASSERT(instr ->value()->representation().IsTagged());
+ return new LIsUndetectableAndBranch(UseRegisterAtStart(instr->value()),
+ TempRegister());
}
-LInstruction* LChunkBuilder::DoHasInstanceType(HHasInstanceType* instr) {
+LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
+ HHasInstanceTypeAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
-
- return DefineAsRegister(new LHasInstanceType(value));
+ return new LHasInstanceTypeAndBranch(UseRegisterAtStart(instr->value()),
+ TempRegister());
}
@@ -1580,20 +1489,20 @@ LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
}
-LInstruction* LChunkBuilder::DoHasCachedArrayIndex(
- HHasCachedArrayIndex* instr) {
+LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
+ HHasCachedArrayIndexAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegister(instr->value());
-
- return DefineAsRegister(new LHasCachedArrayIndex(value));
+ return new LHasCachedArrayIndexAndBranch(
+ UseRegisterAtStart(instr->value()));
}
-LInstruction* LChunkBuilder::DoClassOfTest(HClassOfTest* instr) {
+LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
+ HClassOfTestAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseTempRegister(instr->value());
-
- return DefineSameAsFirst(new LClassOfTest(value, TempRegister()));
+ return new LClassOfTestAndBranch(UseTempRegister(instr->value()),
+ TempRegister(),
+ TempRegister());
}
@@ -1631,7 +1540,7 @@ LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
return AssignEnvironment(new LBoundsCheck(UseRegisterAtStart(instr->index()),
- Use(instr->length())));
+ UseAtStart(instr->length())));
}
@@ -1724,7 +1633,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* value = UseAtStart(instr->value());
return AssignEnvironment(new LCheckNonSmi(value));
}
@@ -1745,13 +1654,13 @@ LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* value = UseAtStart(instr->value());
return AssignEnvironment(new LCheckSmi(value));
}
LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* value = UseAtStart(instr->value());
return AssignEnvironment(new LCheckFunction(value));
}
@@ -2207,13 +2116,14 @@ LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
}
-LInstruction* LChunkBuilder::DoTypeofIs(HTypeofIs* instr) {
- return DefineSameAsFirst(new LTypeofIs(UseRegister(instr->value())));
+LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
+ return new LTypeofIsAndBranch(UseTempRegister(instr->value()));
}
-LInstruction* LChunkBuilder::DoIsConstructCall(HIsConstructCall* instr) {
- return DefineAsRegister(new LIsConstructCall);
+LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
+ HIsConstructCallAndBranch* instr) {
+ return new LIsConstructCallAndBranch(TempRegister());
}
diff --git a/deps/v8/src/ia32/lithium-ia32.h b/deps/v8/src/ia32/lithium-ia32.h
index c5667864c..5f4dd2520 100644
--- a/deps/v8/src/ia32/lithium-ia32.h
+++ b/deps/v8/src/ia32/lithium-ia32.h
@@ -71,15 +71,11 @@ class LCodeGen;
V(ClampDToUint8) \
V(ClampIToUint8) \
V(ClampTToUint8) \
- V(ClassOfTest) \
V(ClassOfTestAndBranch) \
- V(CmpID) \
V(CmpIDAndBranch) \
- V(CmpObjectEq) \
V(CmpObjectEqAndBranch) \
V(CmpMapAndBranch) \
V(CmpT) \
- V(CmpConstantEq) \
V(CmpConstantEqAndBranch) \
V(ConstantD) \
V(ConstantI) \
@@ -97,9 +93,7 @@ class LCodeGen;
V(GlobalObject) \
V(GlobalReceiver) \
V(Goto) \
- V(HasCachedArrayIndex) \
V(HasCachedArrayIndexAndBranch) \
- V(HasInstanceType) \
V(HasInstanceTypeAndBranch) \
V(In) \
V(InstanceOf) \
@@ -107,15 +101,10 @@ class LCodeGen;
V(InstructionGap) \
V(Integer32ToDouble) \
V(InvokeFunction) \
- V(IsConstructCall) \
V(IsConstructCallAndBranch) \
- V(IsNull) \
V(IsNullAndBranch) \
- V(IsObject) \
V(IsObjectAndBranch) \
- V(IsSmi) \
V(IsSmiAndBranch) \
- V(IsUndetectable) \
V(IsUndetectableAndBranch) \
V(JSArrayLength) \
V(Label) \
@@ -167,7 +156,6 @@ class LCodeGen;
V(Throw) \
V(ToFastProperties) \
V(Typeof) \
- V(TypeofIs) \
V(TypeofIsAndBranch) \
V(UnaryMathOperation) \
V(UnknownOSRValue) \
@@ -226,7 +214,6 @@ class LInstruction: public ZoneObject {
virtual bool IsGap() const { return false; }
virtual bool IsControl() const { return false; }
- virtual void SetBranchTargets(int true_block_id, int false_block_id) { }
void set_environment(LEnvironment* env) { environment_ = env; }
LEnvironment* environment() const { return environment_; }
@@ -456,16 +443,15 @@ class LControlInstruction: public LTemplateInstruction<0, I, T> {
public:
virtual bool IsControl() const { return true; }
- int true_block_id() const { return true_block_id_; }
- int false_block_id() const { return false_block_id_; }
- void SetBranchTargets(int true_block_id, int false_block_id) {
- true_block_id_ = true_block_id;
- false_block_id_ = false_block_id;
- }
+ int SuccessorCount() { return hydrogen()->SuccessorCount(); }
+ HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
+ int true_block_id() { return hydrogen()->SuccessorAt(0)->block_id(); }
+ int false_block_id() { return hydrogen()->SuccessorAt(1)->block_id(); }
private:
- int true_block_id_;
- int false_block_id_;
+ HControlInstruction* hydrogen() {
+ return HControlInstruction::cast(this->hydrogen_value());
+ }
};
@@ -567,23 +553,6 @@ class LMulI: public LTemplateInstruction<1, 2, 1> {
};
-class LCmpID: public LTemplateInstruction<1, 2, 0> {
- public:
- LCmpID(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpID, "cmp-id")
- DECLARE_HYDROGEN_ACCESSOR(Compare)
-
- Token::Value op() const { return hydrogen()->token(); }
- bool is_double() const {
- return hydrogen()->GetInputRepresentation().IsDouble();
- }
-};
-
-
class LCmpIDAndBranch: public LControlInstruction<2, 0> {
public:
LCmpIDAndBranch(LOperand* left, LOperand* right) {
@@ -592,7 +561,7 @@ class LCmpIDAndBranch: public LControlInstruction<2, 0> {
}
DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(Compare)
+ DECLARE_HYDROGEN_ACCESSOR(CompareIDAndBranch)
Token::Value op() const { return hydrogen()->token(); }
bool is_double() const {
@@ -617,17 +586,6 @@ class LUnaryMathOperation: public LTemplateInstruction<1, 1, 0> {
};
-class LCmpObjectEq: public LTemplateInstruction<1, 2, 0> {
- public:
- LCmpObjectEq(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpObjectEq, "cmp-object-eq")
-};
-
-
class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
public:
LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
@@ -640,17 +598,6 @@ class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
};
-class LCmpConstantEq: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCmpConstantEq(LOperand* left) {
- inputs_[0] = left;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpConstantEq, "cmp-constant-eq")
- DECLARE_HYDROGEN_ACCESSOR(CompareConstantEq)
-};
-
-
class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
public:
explicit LCmpConstantEqAndBranch(LOperand* left) {
@@ -659,20 +606,7 @@ class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CmpConstantEqAndBranch,
"cmp-constant-eq-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareConstantEq)
-};
-
-
-class LIsNull: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LIsNull(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(IsNull, "is-null")
- DECLARE_HYDROGEN_ACCESSOR(IsNull)
-
- bool is_strict() const { return hydrogen()->is_strict(); }
+ DECLARE_HYDROGEN_ACCESSOR(CompareConstantEqAndBranch)
};
@@ -684,7 +618,7 @@ class LIsNullAndBranch: public LControlInstruction<1, 1> {
}
DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsNull)
+ DECLARE_HYDROGEN_ACCESSOR(IsNullAndBranch)
bool is_strict() const { return hydrogen()->is_strict(); }
@@ -692,16 +626,6 @@ class LIsNullAndBranch: public LControlInstruction<1, 1> {
};
-class LIsObject: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LIsObject(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(IsObject, "is-object")
-};
-
-
class LIsObjectAndBranch: public LControlInstruction<1, 1> {
public:
LIsObjectAndBranch(LOperand* value, LOperand* temp) {
@@ -715,17 +639,6 @@ class LIsObjectAndBranch: public LControlInstruction<1, 1> {
};
-class LIsSmi: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LIsSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(IsSmi, "is-smi")
- DECLARE_HYDROGEN_ACCESSOR(IsSmi)
-};
-
-
class LIsSmiAndBranch: public LControlInstruction<1, 0> {
public:
explicit LIsSmiAndBranch(LOperand* value) {
@@ -733,22 +646,12 @@ class LIsSmiAndBranch: public LControlInstruction<1, 0> {
}
DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
virtual void PrintDataTo(StringStream* stream);
};
-class LIsUndetectable: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LIsUndetectable(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(IsUndetectable, "is-undetectable")
- DECLARE_HYDROGEN_ACCESSOR(IsUndetectable)
-};
-
-
class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
public:
explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
@@ -763,17 +666,6 @@ class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
};
-class LHasInstanceType: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LHasInstanceType(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(HasInstanceType, "has-instance-type")
- DECLARE_HYDROGEN_ACCESSOR(HasInstanceType)
-};
-
-
class LHasInstanceTypeAndBranch: public LControlInstruction<1, 1> {
public:
LHasInstanceTypeAndBranch(LOperand* value, LOperand* temp) {
@@ -783,7 +675,7 @@ class LHasInstanceTypeAndBranch: public LControlInstruction<1, 1> {
DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
"has-instance-type-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(HasInstanceType)
+ DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
virtual void PrintDataTo(StringStream* stream);
};
@@ -800,17 +692,6 @@ class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
};
-class LHasCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LHasCachedArrayIndex(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndex, "has-cached-array-index")
- DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndex)
-};
-
-
class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
public:
explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
@@ -823,13 +704,6 @@ class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
};
-class LIsConstructCall: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(IsConstructCall, "is-construct-call")
- DECLARE_HYDROGEN_ACCESSOR(IsConstructCall)
-};
-
-
class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
public:
explicit LIsConstructCallAndBranch(LOperand* temp) {
@@ -841,20 +715,6 @@ class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
};
-class LClassOfTest: public LTemplateInstruction<1, 1, 1> {
- public:
- LClassOfTest(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ClassOfTest, "class-of-test")
- DECLARE_HYDROGEN_ACCESSOR(ClassOfTest)
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
class LClassOfTestAndBranch: public LControlInstruction<1, 2> {
public:
LClassOfTestAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
@@ -865,7 +725,7 @@ class LClassOfTestAndBranch: public LControlInstruction<1, 2> {
DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
"class-of-test-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(ClassOfTest)
+ DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
virtual void PrintDataTo(StringStream* stream);
};
@@ -879,7 +739,7 @@ class LCmpT: public LTemplateInstruction<1, 2, 0> {
}
DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
- DECLARE_HYDROGEN_ACCESSOR(Compare)
+ DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
Token::Value op() const { return hydrogen()->token(); }
};
@@ -1015,7 +875,7 @@ class LBranch: public LControlInstruction<1, 0> {
}
DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
- DECLARE_HYDROGEN_ACCESSOR(Value)
+ DECLARE_HYDROGEN_ACCESSOR(Branch)
virtual void PrintDataTo(StringStream* stream);
};
@@ -2034,21 +1894,6 @@ class LTypeof: public LTemplateInstruction<1, 1, 0> {
};
-class LTypeofIs: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LTypeofIs(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(TypeofIs, "typeof-is")
- DECLARE_HYDROGEN_ACCESSOR(TypeofIs)
-
- Handle<String> type_literal() { return hydrogen()->type_literal(); }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
public:
explicit LTypeofIsAndBranch(LOperand* value) {
@@ -2056,7 +1901,7 @@ class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
}
DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(TypeofIs)
+ DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
Handle<String> type_literal() { return hydrogen()->type_literal(); }
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index a80821f82..feb82684e 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -1766,17 +1766,14 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
mov(dst, esi);
}
- // We should not have found a with or catch context by walking the context
- // chain (i.e., the static scope chain and runtime context chain do not
- // agree). A variable occurring in such a scope should have slot type
- // LOOKUP and not CONTEXT.
+ // We should not have found a with context by walking the context chain
+ // (i.e., the static scope chain and runtime context chain do not agree).
+ // A variable occurring in such a scope should have slot type LOOKUP and
+ // not CONTEXT.
if (emit_debug_code()) {
cmp(FieldOperand(dst, HeapObject::kMapOffset),
isolate()->factory()->with_context_map());
Check(not_equal, "Variable resolved to with context.");
- cmp(FieldOperand(dst, HeapObject::kMapOffset),
- isolate()->factory()->with_context_map());
- Check(not_equal, "Variable resolved to catch context.");
}
}
diff --git a/deps/v8/src/ic.cc b/deps/v8/src/ic.cc
index 542466d1e..eb0f12a39 100644
--- a/deps/v8/src/ic.cc
+++ b/deps/v8/src/ic.cc
@@ -956,7 +956,7 @@ MaybeObject* LoadIC::Load(State state,
// If we did not find a property, check if we need to throw an exception.
if (!lookup.IsProperty()) {
- if (FLAG_strict || IsContextual(object)) {
+ if (IsContextual(object)) {
return ReferenceError("not_defined", name);
}
LOG(isolate(), SuspectReadEvent(*name, *object));
@@ -1097,16 +1097,6 @@ void LoadIC::UpdateCaches(LookupResult* lookup,
}
-String* KeyedLoadIC::GetStubNameForCache(IC::State ic_state) {
- if (ic_state == MONOMORPHIC) {
- return isolate()->heap()->KeyedLoadElementMonomorphic_symbol();
- } else {
- ASSERT(ic_state == MEGAMORPHIC);
- return isolate()->heap()->KeyedLoadElementPolymorphic_symbol();
- }
-}
-
-
MaybeObject* KeyedLoadIC::GetFastElementStubWithoutMapCheck(
bool is_js_array) {
return KeyedLoadFastElementStub().TryGetCode();
@@ -1230,10 +1220,8 @@ MaybeObject* KeyedLoadIC::Load(State state,
LookupForRead(*object, *name, &lookup);
// If we did not find a property, check if we need to throw an exception.
- if (!lookup.IsProperty()) {
- if (FLAG_strict || IsContextual(object)) {
- return ReferenceError("not_defined", name);
- }
+ if (!lookup.IsProperty() && IsContextual(object)) {
+ return ReferenceError("not_defined", name);
}
if (FLAG_use_ic) {
@@ -1636,18 +1624,14 @@ MaybeObject* KeyedIC::ComputeStub(JSObject* receiver,
StrictModeFlag strict_mode,
Code* generic_stub) {
State ic_state = target()->ic_state();
- Code* monomorphic_stub;
- // Always compute the MONOMORPHIC stub, even if the MEGAMORPHIC stub ends up
- // being used. This is necessary because the megamorphic stub needs to have
- // access to more information than what is stored in the receiver map in some
- // cases (external arrays need the array type from the MONOMORPHIC stub).
- MaybeObject* maybe_stub = ComputeMonomorphicStub(receiver,
- is_store,
- strict_mode,
- generic_stub);
- if (!maybe_stub->To(&monomorphic_stub)) return maybe_stub;
-
if (ic_state == UNINITIALIZED || ic_state == PREMONOMORPHIC) {
+ Code* monomorphic_stub;
+ MaybeObject* maybe_stub = ComputeMonomorphicStub(receiver,
+ is_store,
+ strict_mode,
+ generic_stub);
+ if (!maybe_stub->To(&monomorphic_stub)) return maybe_stub;
+
return monomorphic_stub;
}
ASSERT(target() != generic_stub);
@@ -1698,9 +1682,9 @@ MaybeObject* KeyedIC::ComputeStub(JSObject* receiver,
}
// Build the MEGAMORPHIC stub.
Code* stub;
- maybe_stub = ConstructMegamorphicStub(&target_receiver_maps,
- &handler_ics,
- strict_mode);
+ MaybeObject* maybe_stub = ConstructMegamorphicStub(&target_receiver_maps,
+ &handler_ics,
+ strict_mode);
if (!maybe_stub->To(&stub)) return maybe_stub;
MaybeObject* maybe_update = cache->Update(&target_receiver_maps, flags, stub);
if (maybe_update->IsFailure()) return maybe_update;
@@ -1716,22 +1700,7 @@ MaybeObject* KeyedIC::ComputeMonomorphicStubWithoutMapCheck(
ASSERT(string_stub() != NULL);
return string_stub();
} else if (receiver_map->has_external_array_elements()) {
- // Determine the array type from the default MONOMORPHIC already generated
- // stub. There is no other way to determine the type of the external array
- // directly from the receiver type.
- Code::Kind kind = this->kind();
- Code::Flags flags = Code::ComputeMonomorphicFlags(kind,
- NORMAL,
- strict_mode);
- String* monomorphic_name = GetStubNameForCache(MONOMORPHIC);
- Object* maybe_default_stub = receiver_map->FindInCodeCache(monomorphic_name,
- flags);
- if (maybe_default_stub->IsUndefined()) {
- return generic_stub;
- }
- Code* default_stub = Code::cast(maybe_default_stub);
- Map* first_map = default_stub->FindFirstMap();
- return GetExternalArrayStubWithoutMapCheck(first_map->elements_kind());
+ return GetExternalArrayStubWithoutMapCheck(receiver_map->elements_kind());
} else if (receiver_map->has_fast_elements()) {
bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
return GetFastElementStubWithoutMapCheck(is_js_array);
@@ -1747,7 +1716,8 @@ MaybeObject* KeyedIC::ComputeMonomorphicStub(JSObject* receiver,
Code* generic_stub) {
Code* result = NULL;
if (receiver->HasFastElements() ||
- receiver->HasExternalArrayElements()) {
+ receiver->HasExternalArrayElements() ||
+ receiver->HasDictionaryElements()) {
MaybeObject* maybe_stub =
isolate()->stub_cache()->ComputeKeyedLoadOrStoreElement(
receiver, is_store, strict_mode);
@@ -1759,16 +1729,6 @@ MaybeObject* KeyedIC::ComputeMonomorphicStub(JSObject* receiver,
}
-String* KeyedStoreIC::GetStubNameForCache(IC::State ic_state) {
- if (ic_state == MONOMORPHIC) {
- return isolate()->heap()->KeyedStoreElementMonomorphic_symbol();
- } else {
- ASSERT(ic_state == MEGAMORPHIC);
- return isolate()->heap()->KeyedStoreElementPolymorphic_symbol();
- }
-}
-
-
MaybeObject* KeyedStoreIC::GetFastElementStubWithoutMapCheck(
bool is_js_array) {
return KeyedStoreFastElementStub(is_js_array).TryGetCode();
@@ -1856,6 +1816,7 @@ MaybeObject* KeyedStoreIC::Store(State state,
stub = non_strict_arguments_stub();
} else if (!force_generic) {
if (key->IsSmi() && (target() != non_strict_arguments_stub())) {
+ HandleScope scope(isolate());
MaybeObject* maybe_stub = ComputeStub(receiver,
true,
strict_mode,
@@ -2333,15 +2294,15 @@ RUNTIME_FUNCTION(MaybeObject*, UnaryOp_Patch) {
HandleScope scope(isolate);
Handle<Object> operand = args.at<Object>(0);
- int key = args.smi_at(1);
- Token::Value op = static_cast<Token::Value>(args.smi_at(2));
+ Token::Value op = static_cast<Token::Value>(args.smi_at(1));
+ UnaryOverwriteMode mode = static_cast<UnaryOverwriteMode>(args.smi_at(2));
UnaryOpIC::TypeInfo previous_type =
static_cast<UnaryOpIC::TypeInfo>(args.smi_at(3));
UnaryOpIC::TypeInfo type = UnaryOpIC::GetTypeInfo(operand);
type = UnaryOpIC::ComputeNewType(type, previous_type);
- UnaryOpStub stub(key, type);
+ UnaryOpStub stub(op, mode, type);
Handle<Code> code = stub.GetCode();
if (!code.is_null()) {
if (FLAG_trace_ic) {
diff --git a/deps/v8/src/ic.h b/deps/v8/src/ic.h
index 4b301c5ba..9a663ba6a 100644
--- a/deps/v8/src/ic.h
+++ b/deps/v8/src/ic.h
@@ -358,8 +358,6 @@ class KeyedIC: public IC {
virtual Code::Kind kind() const = 0;
- virtual String* GetStubNameForCache(IC::State ic_state) = 0;
-
MaybeObject* ComputeStub(JSObject* receiver,
bool is_store,
StrictModeFlag strict_mode,
@@ -426,8 +424,6 @@ class KeyedLoadIC: public KeyedIC {
protected:
virtual Code::Kind kind() const { return Code::KEYED_LOAD_IC; }
- virtual String* GetStubNameForCache(IC::State ic_state);
-
virtual MaybeObject* ConstructMegamorphicStub(
MapList* receiver_maps,
CodeList* targets,
@@ -581,8 +577,6 @@ class KeyedStoreIC: public KeyedIC {
protected:
virtual Code::Kind kind() const { return Code::KEYED_STORE_IC; }
- virtual String* GetStubNameForCache(IC::State ic_state);
-
virtual MaybeObject* ConstructMegamorphicStub(
MapList* receiver_maps,
CodeList* targets,
diff --git a/deps/v8/src/isolate.cc b/deps/v8/src/isolate.cc
index e473b020f..01f457b93 100644
--- a/deps/v8/src/isolate.cc
+++ b/deps/v8/src/isolate.cc
@@ -1855,11 +1855,6 @@ void Isolate::Exit() {
}
-void Isolate::ResetEagerOptimizingData() {
- compilation_cache_->ResetEagerOptimizingData();
-}
-
-
#ifdef DEBUG
#define ISOLATE_FIELD_OFFSET(type, name, ignored) \
const intptr_t Isolate::name##_debug_offset_ = OFFSET_OF(Isolate, name##_);
diff --git a/deps/v8/src/isolate.h b/deps/v8/src/isolate.h
index c623d433b..be8141a37 100644
--- a/deps/v8/src/isolate.h
+++ b/deps/v8/src/isolate.h
@@ -332,6 +332,8 @@ class HashMap;
V(int, bad_char_shift_table, kUC16AlphabetSize) \
V(int, good_suffix_shift_table, (kBMMaxShift + 1)) \
V(int, suffix_table, (kBMMaxShift + 1)) \
+ V(uint32_t, random_seed, 2) \
+ V(uint32_t, private_random_seed, 2) \
ISOLATE_INIT_DEBUG_ARRAY_LIST(V)
typedef List<HeapObject*, PreallocatedStorage> DebugObjectCache;
@@ -978,8 +980,6 @@ class Isolate {
}
#endif
- void ResetEagerOptimizingData();
-
void SetData(void* data) { embedder_data_ = data; }
void* GetData() { return embedder_data_; }
diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc
index 7c85c8a26..004e21a65 100644
--- a/deps/v8/src/log.cc
+++ b/deps/v8/src/log.cc
@@ -1362,18 +1362,14 @@ void Logger::TickEvent(TickSample* sample, bool overflow) {
}
-int Logger::GetActiveProfilerModules() {
- int result = PROFILER_MODULE_NONE;
- if (profiler_ != NULL && !profiler_->paused()) {
- result |= PROFILER_MODULE_CPU;
- }
- return result;
+bool Logger::IsProfilerPaused() {
+ return profiler_ == NULL || profiler_->paused();
}
-void Logger::PauseProfiler(int flags, int tag) {
+void Logger::PauseProfiler() {
if (!log_->IsEnabled()) return;
- if (profiler_ != NULL && (flags & PROFILER_MODULE_CPU)) {
+ if (profiler_ != NULL) {
// It is OK to have negative nesting.
if (--cpu_profiler_nesting_ == 0) {
profiler_->pause();
@@ -1388,18 +1384,12 @@ void Logger::PauseProfiler(int flags, int tag) {
--logging_nesting_;
}
}
- if (tag != 0) {
- UncheckedIntEvent("close-tag", tag);
- }
}
-void Logger::ResumeProfiler(int flags, int tag) {
+void Logger::ResumeProfiler() {
if (!log_->IsEnabled()) return;
- if (tag != 0) {
- UncheckedIntEvent("open-tag", tag);
- }
- if (profiler_ != NULL && (flags & PROFILER_MODULE_CPU)) {
+ if (profiler_ != NULL) {
if (cpu_profiler_nesting_++ == 0) {
++logging_nesting_;
if (FLAG_prof_lazy) {
@@ -1421,7 +1411,7 @@ void Logger::ResumeProfiler(int flags, int tag) {
// This function can be called when Log's mutex is acquired,
// either from main or Profiler's thread.
void Logger::LogFailure() {
- PauseProfiler(PROFILER_MODULE_CPU, 0);
+ PauseProfiler();
}
diff --git a/deps/v8/src/log.h b/deps/v8/src/log.h
index 8a627eb3f..6ffd18c61 100644
--- a/deps/v8/src/log.h
+++ b/deps/v8/src/log.h
@@ -280,9 +280,9 @@ class Logger {
// Pause/Resume collection of profiling data.
// When data collection is paused, CPU Tick events are discarded until
// data collection is Resumed.
- void PauseProfiler(int flags, int tag);
- void ResumeProfiler(int flags, int tag);
- int GetActiveProfilerModules();
+ void PauseProfiler();
+ void ResumeProfiler();
+ bool IsProfilerPaused();
// If logging is performed into a memory buffer, allows to
// retrieve previously written messages. See v8.h.
diff --git a/deps/v8/src/mark-compact.cc b/deps/v8/src/mark-compact.cc
index fc1ab9270..efc488668 100644
--- a/deps/v8/src/mark-compact.cc
+++ b/deps/v8/src/mark-compact.cc
@@ -1424,6 +1424,12 @@ void MarkCompactCollector::MarkLiveObjects() {
// reachable from the weak roots.
ProcessExternalMarking();
+ // Object literal map caches reference symbols (cache keys) and maps
+ // (cache values). At this point still useful maps have already been
+ // marked. Mark the keys for the alive values before we process the
+ // symbol table.
+ ProcessMapCaches();
+
// Prune the symbol table removing all symbols only pointed to by the
// symbol table. Cannot use symbol_table() here because the symbol
// table is marked.
@@ -1452,6 +1458,57 @@ void MarkCompactCollector::MarkLiveObjects() {
}
+void MarkCompactCollector::ProcessMapCaches() {
+ Object* raw_context = heap()->global_contexts_list_;
+ while (raw_context != heap()->undefined_value()) {
+ Context* context = reinterpret_cast<Context*>(raw_context);
+ if (context->IsMarked()) {
+ HeapObject* raw_map_cache =
+ HeapObject::cast(context->get(Context::MAP_CACHE_INDEX));
+ // A map cache may be reachable from the stack. In this case
+ // it's already transitively marked and it's too late to clean
+ // up its parts.
+ if (!raw_map_cache->IsMarked() &&
+ raw_map_cache != heap()->undefined_value()) {
+ MapCache* map_cache = reinterpret_cast<MapCache*>(raw_map_cache);
+ int existing_elements = map_cache->NumberOfElements();
+ int used_elements = 0;
+ for (int i = MapCache::kElementsStartIndex;
+ i < map_cache->length();
+ i += MapCache::kEntrySize) {
+ Object* raw_key = map_cache->get(i);
+ if (raw_key == heap()->undefined_value() ||
+ raw_key == heap()->null_value()) continue;
+ STATIC_ASSERT(MapCache::kEntrySize == 2);
+ Object* raw_map = map_cache->get(i + 1);
+ if (raw_map->IsHeapObject() &&
+ HeapObject::cast(raw_map)->IsMarked()) {
+ ++used_elements;
+ } else {
+ // Delete useless entries with unmarked maps.
+ ASSERT(raw_map->IsMap());
+ map_cache->set_null_unchecked(heap(), i);
+ map_cache->set_null_unchecked(heap(), i + 1);
+ }
+ }
+ if (used_elements == 0) {
+ context->set(Context::MAP_CACHE_INDEX, heap()->undefined_value());
+ } else {
+ // Note: we don't actually shrink the cache here to avoid
+ // extra complexity during GC. We rely on subsequent cache
+ // usages (EnsureCapacity) to do this.
+ map_cache->ElementsRemoved(existing_elements - used_elements);
+ MarkObject(map_cache);
+ }
+ }
+ }
+ // Move to next element in the list.
+ raw_context = context->get(Context::NEXT_CONTEXT_LINK);
+ }
+ ProcessMarkingStack();
+}
+
+
#ifdef DEBUG
void MarkCompactCollector::UpdateLiveObjectCount(HeapObject* obj) {
live_bytes_ += obj->Size();
diff --git a/deps/v8/src/mark-compact.h b/deps/v8/src/mark-compact.h
index 04d0ff69a..179edba74 100644
--- a/deps/v8/src/mark-compact.h
+++ b/deps/v8/src/mark-compact.h
@@ -306,6 +306,10 @@ class MarkCompactCollector {
// flag on the marking stack.
void RefillMarkingStack();
+ // After reachable maps have been marked process per context object
+ // literal map caches removing unmarked entries.
+ void ProcessMapCaches();
+
// Callback function for telling whether the object *p is an unmarked
// heap object.
static bool IsUnmarkedHeapObject(Object** p);
diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc
index 1aa1838be..d7fac867f 100644
--- a/deps/v8/src/mips/code-stubs-mips.cc
+++ b/deps/v8/src/mips/code-stubs-mips.cc
@@ -166,7 +166,6 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
// Setup the fixed slots.
__ li(a1, Operand(Smi::FromInt(0)));
__ sw(a3, MemOperand(v0, Context::SlotOffset(Context::CLOSURE_INDEX)));
- __ sw(v0, MemOperand(v0, Context::SlotOffset(Context::FCONTEXT_INDEX)));
__ sw(cp, MemOperand(v0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
__ sw(a1, MemOperand(v0, Context::SlotOffset(Context::EXTENSION_INDEX)));
@@ -1847,19 +1846,13 @@ void UnaryOpStub::Generate(MacroAssembler* masm) {
void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
// Argument is in a0 and v0 at this point, so we can overwrite a0.
- // Push this stub's key. Although the operation and the type info are
- // encoded into the key, the encoding is opaque, so push them too.
- __ li(a2, Operand(Smi::FromInt(MinorKey())));
- __ li(a1, Operand(Smi::FromInt(op_)));
+ __ li(a2, Operand(Smi::FromInt(op_)));
+ __ li(a1, Operand(Smi::FromInt(mode_)));
__ li(a0, Operand(Smi::FromInt(operand_type_)));
-
__ Push(v0, a2, a1, a0);
__ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kUnaryOp_Patch),
- masm->isolate()),
- 4,
- 1);
+ ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
}
diff --git a/deps/v8/src/mips/code-stubs-mips.h b/deps/v8/src/mips/code-stubs-mips.h
index e2323c174..6c70bdd70 100644
--- a/deps/v8/src/mips/code-stubs-mips.h
+++ b/deps/v8/src/mips/code-stubs-mips.h
@@ -61,18 +61,11 @@ class TranscendentalCacheStub: public CodeStub {
class UnaryOpStub: public CodeStub {
public:
- UnaryOpStub(Token::Value op, UnaryOverwriteMode mode)
+ UnaryOpStub(Token::Value op,
+ UnaryOverwriteMode mode,
+ UnaryOpIC::TypeInfo operand_type = UnaryOpIC::UNINITIALIZED)
: op_(op),
mode_(mode),
- operand_type_(UnaryOpIC::UNINITIALIZED),
- name_(NULL) {
- }
-
- UnaryOpStub(
- int key,
- UnaryOpIC::TypeInfo operand_type)
- : op_(OpBits::decode(key)),
- mode_(ModeBits::decode(key)),
operand_type_(operand_type),
name_(NULL) {
}
@@ -90,8 +83,7 @@ class UnaryOpStub: public CodeStub {
#ifdef DEBUG
void Print() {
- PrintF("UnaryOpStub %d (op %s), "
- "(mode %d, runtime_type_info %s)\n",
+ PrintF("UnaryOpStub %d (op %s), (mode %d, runtime_type_info %s)\n",
MinorKey(),
Token::String(op_),
static_cast<int>(mode_),
diff --git a/deps/v8/src/mips/deoptimizer-mips.cc b/deps/v8/src/mips/deoptimizer-mips.cc
index 4b69859a4..9a19aba75 100644
--- a/deps/v8/src/mips/deoptimizer-mips.cc
+++ b/deps/v8/src/mips/deoptimizer-mips.cc
@@ -78,6 +78,11 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
}
+void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
+ UNIMPLEMENTED();
+}
+
+
void Deoptimizer::EntryGenerator::Generate() {
UNIMPLEMENTED();
}
diff --git a/deps/v8/src/mips/full-codegen-mips.cc b/deps/v8/src/mips/full-codegen-mips.cc
index ea0b09225..5b9bbb578 100644
--- a/deps/v8/src/mips/full-codegen-mips.cc
+++ b/deps/v8/src/mips/full-codegen-mips.cc
@@ -101,16 +101,18 @@ class JumpPatchSite BASE_EMBEDDED {
}
void EmitPatchInfo() {
- int delta_to_patch_site = masm_->InstructionsGeneratedSince(&patch_site_);
- Register reg = Register::from_code(delta_to_patch_site / kImm16Mask);
- __ andi(at, reg, delta_to_patch_site % kImm16Mask);
+ if (patch_site_.is_bound()) {
+ int delta_to_patch_site = masm_->InstructionsGeneratedSince(&patch_site_);
+ Register reg = Register::from_code(delta_to_patch_site / kImm16Mask);
+ __ andi(at, reg, delta_to_patch_site % kImm16Mask);
#ifdef DEBUG
- info_emitted_ = true;
+ info_emitted_ = true;
#endif
+ } else {
+ __ nop(); // Signals no inlined code.
+ }
}
- bool is_bound() const { return patch_site_.is_bound(); }
-
private:
MacroAssembler* masm_;
Label patch_site_;
@@ -137,6 +139,7 @@ class JumpPatchSite BASE_EMBEDDED {
void FullCodeGenerator::Generate(CompilationInfo* info) {
ASSERT(info_ == NULL);
info_ = info;
+ scope_ = info->scope();
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
@@ -154,13 +157,13 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
if (info->is_strict_mode() || info->is_native()) {
Label ok;
__ Branch(&ok, eq, t1, Operand(zero_reg));
- int receiver_offset = scope()->num_parameters() * kPointerSize;
+ int receiver_offset = info->scope()->num_parameters() * kPointerSize;
__ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
__ sw(a2, MemOperand(sp, receiver_offset));
__ bind(&ok);
}
- int locals_count = scope()->num_stack_slots();
+ int locals_count = info->scope()->num_stack_slots();
__ Push(ra, fp, cp, a1);
if (locals_count > 0) {
@@ -180,7 +183,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
bool function_in_register = true;
// Possibly allocate a local context.
- int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
Comment cmnt(masm_, "[ Allocate local context");
// Argument to NewContext is the function, which is in a1.
@@ -196,7 +199,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// passed to us. It's saved in the stack and kept live in cp.
__ sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Copy any necessary parameters into the context.
- int num_parameters = scope()->num_parameters();
+ int num_parameters = info->scope()->num_parameters();
for (int i = 0; i < num_parameters; i++) {
Slot* slot = scope()->parameter(i)->AsSlot();
if (slot != NULL && slot->type() == Slot::CONTEXT) {
@@ -228,10 +231,11 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
__ mov(a3, a1);
}
// Receiver is just before the parameters on the caller's stack.
- int offset = scope()->num_parameters() * kPointerSize;
+ int num_parameters = info->scope()->num_parameters();
+ int offset = num_parameters * kPointerSize;
__ Addu(a2, fp,
Operand(StandardFrameConstants::kCallerSPOffset + offset));
- __ li(a1, Operand(Smi::FromInt(scope()->num_parameters())));
+ __ li(a1, Operand(Smi::FromInt(num_parameters)));
__ Push(a3, a2, a1);
// Arguments to ArgumentsAccessStub:
@@ -348,7 +352,7 @@ void FullCodeGenerator::EmitReturnSequence() {
{ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
// Here we use masm_-> instead of the __ macro to avoid the code coverage
// tool from instrumenting as we rely on the code size here.
- int32_t sp_delta = (scope()->num_parameters() + 1) * kPointerSize;
+ int32_t sp_delta = (info_->scope()->num_parameters() + 1) * kPointerSize;
CodeGenerator::RecordPositions(masm_, function()->end_position() - 1);
__ RecordJSReturn();
masm_->mov(sp, fp);
@@ -716,10 +720,14 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
// context.
ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
if (FLAG_debug_code) {
- // Check that we're not inside a 'with'.
- __ lw(a1, ContextOperand(cp, Context::FCONTEXT_INDEX));
- __ Check(eq, "Unexpected declaration in current context.",
- a1, Operand(cp));
+ // Check that we're not inside a with or catch context.
+ __ lw(a1, FieldMemOperand(cp, HeapObject::kMapOffset));
+ __ LoadRoot(t0, Heap::kWithContextMapRootIndex);
+ __ Check(ne, "Declaration in with context.",
+ a1, Operand(t0));
+ __ LoadRoot(t0, Heap::kCatchContextMapRootIndex);
+ __ Check(ne, "Declaration in catch context.",
+ a1, Operand(t0));
}
if (mode == Variable::CONST) {
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
@@ -790,7 +798,7 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
+ __ CallWithAstId(ic);
// Value in v0 is ignored (declarations are statements).
}
}
@@ -865,7 +873,8 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
- EmitCallIC(ic, &patch_site, clause->CompareId());
+ __ CallWithAstId(ic, RelocInfo::CODE_TARGET, clause->CompareId());
+ patch_site.EmitPatchInfo();
__ Branch(&next_test, ne, v0, Operand(zero_reg));
__ Drop(1); // Switch value is no longer needed.
@@ -1164,7 +1173,7 @@ void FullCodeGenerator::EmitLoadGlobalSlotCheckExtensions(
? RelocInfo::CODE_TARGET
: RelocInfo::CODE_TARGET_CONTEXT;
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- EmitCallIC(ic, mode, AstNode::kNoNumber);
+ __ CallWithAstId(ic, mode);
}
@@ -1244,7 +1253,7 @@ void FullCodeGenerator::EmitDynamicLoadFromSlotFastCase(
__ li(a0, Operand(key_literal->handle()));
Handle<Code> ic =
isolate()->builtins()->KeyedLoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(property));
+ __ CallWithAstId(ic, RelocInfo::CODE_TARGET, GetPropertyId(property));
__ Branch(done);
}
}
@@ -1266,7 +1275,7 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var) {
__ lw(a0, GlobalObjectOperand());
__ li(a2, Operand(var->name()));
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT, AstNode::kNoNumber);
+ __ CallWithAstId(ic, RelocInfo::CODE_TARGET_CONTEXT);
context()->Plug(v0);
} else if (slot->type() == Slot::LOOKUP) {
@@ -1412,7 +1421,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, key->id());
+ __ CallWithAstId(ic, RelocInfo::CODE_TARGET, key->id());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
VisitForEffect(value);
@@ -1656,7 +1665,7 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
__ li(a2, Operand(key->handle()));
// Call load IC. It has arguments receiver and property name a0 and a2.
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+ __ CallWithAstId(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
}
@@ -1665,7 +1674,7 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
__ mov(a0, result_register());
// Call keyed load IC. It has arguments key and receiver in a0 and a1.
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+ __ CallWithAstId(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
}
@@ -1693,7 +1702,8 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&stub_call);
BinaryOpStub stub(op, mode);
- EmitCallIC(stub.GetCode(), &patch_site, expr->id());
+ __ CallWithAstId(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ patch_site.EmitPatchInfo();
__ jmp(&done);
__ bind(&smi_case);
@@ -1774,7 +1784,9 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
__ mov(a0, result_register());
__ pop(a1);
BinaryOpStub stub(op, mode);
- EmitCallIC(stub.GetCode(), NULL, expr->id());
+ JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
+ __ CallWithAstId(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ patch_site.EmitPatchInfo();
context()->Plug(v0);
}
@@ -1814,7 +1826,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
+ __ CallWithAstId(ic);
break;
}
case KEYED_PROPERTY: {
@@ -1827,7 +1839,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
+ __ CallWithAstId(ic);
break;
}
}
@@ -1852,7 +1864,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT, AstNode::kNoNumber);
+ __ CallWithAstId(ic, RelocInfo::CODE_TARGET_CONTEXT);
} else if (op == Token::INIT_CONST) {
// Like var declarations, const declarations are hoisted to function
@@ -1873,17 +1885,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ Branch(&skip, ne, a1, Operand(t0));
__ sw(result_register(), MemOperand(fp, SlotOffset(slot)));
break;
- case Slot::CONTEXT: {
- __ lw(a1, ContextOperand(cp, Context::FCONTEXT_INDEX));
- __ lw(a2, ContextOperand(a1, slot->index()));
- __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
- __ Branch(&skip, ne, a2, Operand(t0));
- __ sw(result_register(), ContextOperand(a1, slot->index()));
- int offset = Context::SlotOffset(slot->index());
- __ mov(a3, result_register()); // Preserve the stored value in v0.
- __ RecordWrite(a1, Operand(offset), a3, a2);
- break;
- }
+ case Slot::CONTEXT:
case Slot::LOOKUP:
__ push(result_register());
__ li(a0, Operand(slot->var()->name()));
@@ -1960,7 +1962,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ __ CallWithAstId(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@@ -2012,7 +2014,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ __ CallWithAstId(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@@ -2065,7 +2067,7 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr,
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
Handle<Code> ic =
isolate()->stub_cache()->ComputeCallInitialize(arg_count, in_loop, mode);
- EmitCallIC(ic, mode, expr->id());
+ __ CallWithAstId(ic, mode, expr->id());
RecordJSReturnSite(expr);
// Restore context register.
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2099,7 +2101,7 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
Handle<Code> ic =
isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count, in_loop);
__ lw(a2, MemOperand(sp, (arg_count + 1) * kPointerSize)); // Key.
- EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ __ CallWithAstId(ic, RelocInfo::CODE_TARGET, expr->id());
RecordJSReturnSite(expr);
// Restore context register.
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2139,7 +2141,8 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
__ push(a1);
// Push the receiver of the enclosing function and do runtime call.
- __ lw(a1, MemOperand(fp, (2 + scope()->num_parameters()) * kPointerSize));
+ int receiver_offset = 2 + info_->scope()->num_parameters();
+ __ lw(a1, MemOperand(fp, receiver_offset * kPointerSize));
__ push(a1);
// Push the strict mode flag.
__ li(a1, Operand(Smi::FromInt(strict_mode_flag())));
@@ -2280,7 +2283,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
} else {
// Call to a keyed property.
// For a synthetic property use keyed load IC followed by function call,
- // for a regular property use keyed EmitCallIC.
+ // for a regular property use EmitKeyedCallWithIC.
if (prop->is_synthetic()) {
// Do not visit the object and key subexpressions (they are shared
// by all occurrences of the same rewritten parameter).
@@ -2298,7 +2301,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
SetSourcePosition(prop->position());
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+ __ CallWithAstId(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
__ lw(a1, GlobalObjectOperand());
__ lw(a1, FieldMemOperand(a1, GlobalObject::kGlobalReceiverOffset));
__ Push(v0, a1); // Function, receiver.
@@ -2685,7 +2688,7 @@ void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
// parameter count in a0.
VisitForAccumulatorValue(args->at(0));
__ mov(a1, v0);
- __ li(a0, Operand(Smi::FromInt(scope()->num_parameters())));
+ __ li(a0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
__ CallStub(&stub);
context()->Plug(v0);
@@ -2697,7 +2700,7 @@ void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
Label exit;
// Get the number of formal parameters.
- __ li(v0, Operand(Smi::FromInt(scope()->num_parameters())));
+ __ li(v0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
// Check if the calling frame is an arguments adaptor frame.
__ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
@@ -3596,6 +3599,39 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
}
+void FullCodeGenerator::EmitIsNativeOrStrictMode(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ // Load the function into v0.
+ VisitForAccumulatorValue(args->at(0));
+
+ // Prepare for the test.
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ // Test for strict mode function.
+ __ lw(a1, FieldMemOperand(v0, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(a1, FieldMemOperand(a1, SharedFunctionInfo::kCompilerHintsOffset));
+ __ And(at, a1, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
+ kSmiTagSize)));
+ __ Branch(if_true, ne, at, Operand(zero_reg));
+
+ // Test for native function.
+ __ And(at, a1, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
+ __ Branch(if_true, ne, at, Operand(zero_reg));
+
+ // Not native or strict-mode function.
+ __ Branch(if_false);
+
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ context()->Plug(if_true, if_false);
+}
+
+
void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
Handle<String> name = expr->name();
if (name->length() > 0 && name->Get(0) == '_') {
@@ -3628,7 +3664,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
isolate()->stub_cache()->ComputeCallInitialize(arg_count,
NOT_IN_LOOP,
mode);
- EmitCallIC(ic, mode, expr->id());
+ __ CallWithAstId(ic, mode, expr->id());
// Restore context register.
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
} else {
@@ -3771,7 +3807,7 @@ void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
VisitForAccumulatorValue(expr->expression());
SetSourcePosition(expr->position());
__ mov(a0, result_register());
- EmitCallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ __ CallWithAstId(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
context()->Plug(v0);
}
@@ -3882,7 +3918,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
SetSourcePosition(expr->position());
BinaryOpStub stub(Token::ADD, NO_OVERWRITE);
- EmitCallIC(stub.GetCode(), &patch_site, expr->CountId());
+ __ CallWithAstId(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId());
+ patch_site.EmitPatchInfo();
__ bind(&done);
// Store the value returned in v0.
@@ -3914,7 +3951,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ __ CallWithAstId(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -3932,7 +3969,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ __ CallWithAstId(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -3956,7 +3993,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
// Use a regular load, not a contextual load, to avoid a reference
// error.
- EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
+ __ CallWithAstId(ic);
PrepareForBailout(expr, TOS_REG);
context()->Plug(v0);
} else if (proxy != NULL &&
@@ -4153,7 +4190,8 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC.
SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(op);
- EmitCallIC(ic, &patch_site, expr->id());
+ __ CallWithAstId(ic, RelocInfo::CODE_TARGET, expr->id());
+ patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(cc, v0, Operand(zero_reg), if_true, if_false, fall_through);
}
@@ -4212,70 +4250,6 @@ Register FullCodeGenerator::context_register() {
}
-void FullCodeGenerator::EmitCallIC(Handle<Code> ic,
- RelocInfo::Mode mode,
- unsigned ast_id) {
- ASSERT(mode == RelocInfo::CODE_TARGET ||
- mode == RelocInfo::CODE_TARGET_CONTEXT);
- Counters* counters = isolate()->counters();
- switch (ic->kind()) {
- case Code::LOAD_IC:
- __ IncrementCounter(counters->named_load_full(), 1, a1, a2);
- break;
- case Code::KEYED_LOAD_IC:
- __ IncrementCounter(counters->keyed_load_full(), 1, a1, a2);
- break;
- case Code::STORE_IC:
- __ IncrementCounter(counters->named_store_full(), 1, a1, a2);
- break;
- case Code::KEYED_STORE_IC:
- __ IncrementCounter(counters->keyed_store_full(), 1, a1, a2);
- default:
- break;
- }
- if (ast_id == kNoASTId || mode == RelocInfo::CODE_TARGET_CONTEXT) {
- __ Call(ic, mode);
- } else {
- ASSERT(mode == RelocInfo::CODE_TARGET);
- mode = RelocInfo::CODE_TARGET_WITH_ID;
- __ CallWithAstId(ic, mode, ast_id);
- }
-}
-
-
-void FullCodeGenerator::EmitCallIC(Handle<Code> ic,
- JumpPatchSite* patch_site,
- unsigned ast_id) {
- Counters* counters = isolate()->counters();
- switch (ic->kind()) {
- case Code::LOAD_IC:
- __ IncrementCounter(counters->named_load_full(), 1, a1, a2);
- break;
- case Code::KEYED_LOAD_IC:
- __ IncrementCounter(counters->keyed_load_full(), 1, a1, a2);
- break;
- case Code::STORE_IC:
- __ IncrementCounter(counters->named_store_full(), 1, a1, a2);
- break;
- case Code::KEYED_STORE_IC:
- __ IncrementCounter(counters->keyed_store_full(), 1, a1, a2);
- default:
- break;
- }
-
- if (ast_id == kNoASTId) {
- __ Call(ic, RelocInfo::CODE_TARGET);
- } else {
- __ CallWithAstId(ic, RelocInfo::CODE_TARGET_WITH_ID, ast_id);
- }
- if (patch_site != NULL && patch_site->is_bound()) {
- patch_site->EmitPatchInfo();
- } else {
- __ nop(); // Signals no inlined code.
- }
-}
-
-
void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
__ sw(value, MemOperand(fp, frame_offset));
@@ -4287,6 +4261,27 @@ void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
}
+void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
+ Scope* declaration_scope = scope()->DeclarationScope();
+ if (declaration_scope->is_global_scope()) {
+ // Contexts nested in the global context have a canonical empty function
+ // as their closure, not the anonymous closure containing the global
+ // code. Pass a smi sentinel and let the runtime look up the empty
+ // function.
+ __ li(at, Operand(Smi::FromInt(0)));
+ } else if (declaration_scope->is_eval_scope()) {
+ // Contexts created by a call to eval have the same closure as the
+ // context calling eval, not the anonymous closure containing the eval
+ // code. Fetch it from the context.
+ __ lw(at, ContextOperand(cp, Context::CLOSURE_INDEX));
+ } else {
+ ASSERT(declaration_scope->is_function_scope());
+ __ lw(at, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ }
+ __ push(at);
+}
+
+
// ----------------------------------------------------------------------------
// Non-local control flow support.
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
index 990b4995f..7c085baac 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -2088,10 +2088,12 @@ void MacroAssembler::CallWithAstId(Handle<Code> code,
Condition cond,
Register r1,
const Operand& r2) {
- ASSERT(rmode == RelocInfo::CODE_TARGET_WITH_ID);
- ASSERT(ast_id != kNoASTId);
- ASSERT(ast_id_for_reloc_info_ == kNoASTId);
- ast_id_for_reloc_info_ = ast_id;
+ ASSERT(RelocInfo::IsCodeTarget(rmode));
+ if (rmode == RelocInfo::CODE_TARGET && ast_id != kNoASTId) {
+ ASSERT(ast_id_for_reloc_info_ == kNoASTId);
+ ast_id_for_reloc_info_ = ast_id;
+ rmode = RelocInfo::CODE_TARGET_WITH_ID;
+ }
Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond, r1, r2);
}
@@ -3715,17 +3717,6 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
// cannot be allowed to destroy the context in esi).
Move(dst, cp);
}
-
- // We should not have found a 'with' context by walking the context chain
- // (i.e., the static scope chain and runtime context chain do not agree).
- // A variable occurring in such a scope should have slot type LOOKUP and
- // not CONTEXT.
- if (emit_debug_code()) {
- lw(t9, MemOperand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
- Check(eq, "Yo dawg, I heard you liked function contexts "
- "so I put function contexts in all your contexts",
- dst, Operand(t9));
- }
}
diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h
index 83bd73e09..985ef0c83 100644
--- a/deps/v8/src/mips/macro-assembler-mips.h
+++ b/deps/v8/src/mips/macro-assembler-mips.h
@@ -181,8 +181,8 @@ DECLARE_NOTARGET_PROTOTYPE(Ret)
#undef DECLARE_BRANCH_PROTOTYPES
void CallWithAstId(Handle<Code> code,
- RelocInfo::Mode rmode,
- unsigned ast_id,
+ RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
+ unsigned ast_id = kNoASTId,
Condition cond = al,
Register r1 = zero_reg,
const Operand& r2 = Operand(zero_reg));
diff --git a/deps/v8/src/mirror-debugger.js b/deps/v8/src/mirror-debugger.js
index 3a0353515..57de6c68f 100644
--- a/deps/v8/src/mirror-debugger.js
+++ b/deps/v8/src/mirror-debugger.js
@@ -1243,13 +1243,17 @@ const kFrameDetailsLocalCountIndex = 4;
const kFrameDetailsSourcePositionIndex = 5;
const kFrameDetailsConstructCallIndex = 6;
const kFrameDetailsAtReturnIndex = 7;
-const kFrameDetailsDebuggerFrameIndex = 8;
+const kFrameDetailsFlagsIndex = 8;
const kFrameDetailsFirstDynamicIndex = 9;
const kFrameDetailsNameIndex = 0;
const kFrameDetailsValueIndex = 1;
const kFrameDetailsNameValueSize = 2;
+const kFrameDetailsFlagDebuggerFrame = 1;
+const kFrameDetailsFlagOptimizedFrame = 2;
+const kFrameDetailsFlagInlinedFrame = 4;
+
/**
* Wrapper for the frame details information retreived from the VM. The frame
* details from the VM is an array with the following content. See runtime.cc
@@ -1262,7 +1266,7 @@ const kFrameDetailsNameValueSize = 2;
* 5: Source position
* 6: Construct call
* 7: Is at return
- * 8: Debugger frame
+ * 8: Flags (debugger frame, optimized frame, inlined frame)
* Arguments name, value
* Locals name, value
* Return value if any
@@ -1308,7 +1312,22 @@ FrameDetails.prototype.isAtReturn = function() {
FrameDetails.prototype.isDebuggerFrame = function() {
%CheckExecutionState(this.break_id_);
- return this.details_[kFrameDetailsDebuggerFrameIndex];
+ var f = kFrameDetailsFlagDebuggerFrame;
+ return (this.details_[kFrameDetailsFlagsIndex] & f) == f;
+}
+
+
+FrameDetails.prototype.isOptimizedFrame = function() {
+ %CheckExecutionState(this.break_id_);
+ var f = kFrameDetailsFlagOptimizedFrame;
+ return (this.details_[kFrameDetailsFlagsIndex] & f) == f;
+}
+
+
+FrameDetails.prototype.isInlinedFrame = function() {
+ %CheckExecutionState(this.break_id_);
+ var f = kFrameDetailsFlagInlinedFrame;
+ return (this.details_[kFrameDetailsFlagsIndex] & f) == f;
}
@@ -1447,6 +1466,16 @@ FrameMirror.prototype.isDebuggerFrame = function() {
};
+FrameMirror.prototype.isOptimizedFrame = function() {
+ return this.details_.isOptimizedFrame();
+};
+
+
+FrameMirror.prototype.isInlinedFrame = function() {
+ return this.details_.isInlinedFrame();
+};
+
+
FrameMirror.prototype.argumentCount = function() {
return this.details_.argumentCount();
};
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index 92a2ed494..6242198ec 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -3031,11 +3031,33 @@ MaybeObject* JSObject::DeleteFastElement(uint32_t index) {
if (!maybe->ToObject(&writable)) return maybe;
backing_store = FixedArray::cast(writable);
}
- int length = IsJSArray()
+ uint32_t length = static_cast<uint32_t>(
+ IsJSArray()
? Smi::cast(JSArray::cast(this)->length())->value()
- : backing_store->length();
- if (index < static_cast<uint32_t>(length)) {
+ : backing_store->length());
+ if (index < length) {
backing_store->set_the_hole(index);
+ // If an old space backing store is larger than a certain size and
+ // has too few used values, normalize it.
+ // To avoid doing the check on every delete we require at least
+ // one adjacent hole to the value being deleted.
+ Object* hole = heap->the_hole_value();
+ const int kMinLengthForSparsenessCheck = 64;
+ if (backing_store->length() >= kMinLengthForSparsenessCheck &&
+ !heap->InNewSpace(backing_store) &&
+ ((index > 0 && backing_store->get(index - 1) == hole) ||
+ (index + 1 < length && backing_store->get(index + 1) == hole))) {
+ int num_used = 0;
+ for (int i = 0; i < backing_store->length(); ++i) {
+ if (backing_store->get(i) != hole) ++num_used;
+ // Bail out early if more than 1/4 is used.
+ if (4 * num_used > backing_store->length()) break;
+ }
+ if (4 * num_used <= backing_store->length()) {
+ MaybeObject* result = NormalizeElements();
+ if (result->IsFailure()) return result;
+ }
+ }
}
return heap->true_value();
}
@@ -6287,19 +6309,6 @@ void JSFunction::MarkForLazyRecompilation() {
}
-uint32_t JSFunction::SourceHash() {
- uint32_t hash = 0;
- Object* script = shared()->script();
- if (!script->IsUndefined()) {
- Object* source = Script::cast(script)->source();
- if (source->IsUndefined()) hash = String::cast(source)->Hash();
- }
- hash ^= ComputeIntegerHash(shared()->start_position_and_type());
- hash += ComputeIntegerHash(shared()->end_position());
- return hash;
-}
-
-
bool JSFunction::IsInlineable() {
if (IsBuiltin()) return false;
SharedFunctionInfo* shared_info = shared();
@@ -6950,7 +6959,7 @@ Map* Code::FindFirstMap() {
}
-#if defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
+#ifdef ENABLE_DISASSEMBLER
void DeoptimizationInputData::DeoptimizationInputDataPrint(FILE* out) {
disasm::NameConverter converter;
@@ -7098,10 +7107,6 @@ void DeoptimizationOutputData::DeoptimizationOutputDataPrint(FILE* out) {
}
}
-#endif // defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
-
-
-#ifdef ENABLE_DISASSEMBLER
// Identify kind of code.
const char* Code::Kind2String(Kind kind) {
@@ -7192,6 +7197,9 @@ void Code::Disassemble(const char* name, FILE* out) {
if (ic_state() == MONOMORPHIC) {
PrintF(out, "type = %s\n", PropertyType2String(type()));
}
+ if (is_call_stub() || is_keyed_call_stub()) {
+ PrintF(out, "argc = %d\n", arguments_count());
+ }
}
if ((name != NULL) && (name[0] != '\0')) {
PrintF(out, "name = %s\n", name);
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index 9deee23fc..b2b1a77c6 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -3418,7 +3418,7 @@ class DeoptimizationInputData: public FixedArray {
// Casting.
static inline DeoptimizationInputData* cast(Object* obj);
-#if defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
+#ifdef ENABLE_DISASSEMBLER
void DeoptimizationInputDataPrint(FILE* out);
#endif
@@ -3946,6 +3946,10 @@ class Map: public HeapObject {
kind <= JSObject::LAST_EXTERNAL_ARRAY_ELEMENTS_KIND;
}
+ inline bool has_dictionary_elements() {
+ return elements_kind() == JSObject::DICTIONARY_ELEMENTS;
+ }
+
// Tells whether the map is attached to SharedFunctionInfo
// (for inobject slack tracking).
inline void set_attached_to_shared_function_info(bool value);
@@ -4924,9 +4928,6 @@ class JSFunction: public JSObject {
// recompilation.
inline bool IsMarkedForLazyRecompilation();
- // Compute a hash code for the source code of this function.
- uint32_t SourceHash();
-
// Check whether or not this function is inlineable.
bool IsInlineable();
diff --git a/deps/v8/src/parser.cc b/deps/v8/src/parser.cc
index 4e5ba1389..184f0a2a2 100644
--- a/deps/v8/src/parser.cc
+++ b/deps/v8/src/parser.cc
@@ -411,6 +411,7 @@ Scope* Parser::NewScope(Scope* parent, Scope::Type type, bool inside_with) {
return result;
}
+
// ----------------------------------------------------------------------------
// Target is a support class to facilitate manipulation of the
// Parser's target_stack_ (the stack of potential 'break' and
@@ -1301,13 +1302,14 @@ VariableProxy* Parser::Declare(Handle<String> name,
// to the calling function context.
// Similarly, strict mode eval scope does not leak variable declarations to
// the caller's scope so we declare all locals, too.
- if (top_scope_->is_function_scope() ||
- top_scope_->is_strict_mode_eval_scope()) {
+ Scope* declaration_scope = top_scope_->DeclarationScope();
+ if (declaration_scope->is_function_scope() ||
+ declaration_scope->is_strict_mode_eval_scope()) {
// Declare the variable in the function scope.
- var = top_scope_->LocalLookup(name);
+ var = declaration_scope->LocalLookup(name);
if (var == NULL) {
// Declare the name.
- var = top_scope_->DeclareLocal(name, mode);
+ var = declaration_scope->DeclareLocal(name, mode);
} else {
// The name was declared before; check for conflicting
// re-declarations. If the previous declaration was a const or the
@@ -1323,7 +1325,7 @@ VariableProxy* Parser::Declare(Handle<String> name,
Expression* expression =
NewThrowTypeError(isolate()->factory()->redeclaration_symbol(),
type_string, name);
- top_scope_->SetIllegalRedeclaration(expression);
+ declaration_scope->SetIllegalRedeclaration(expression);
}
}
}
@@ -1344,14 +1346,18 @@ VariableProxy* Parser::Declare(Handle<String> name,
// semantic issue as long as we keep the source order, but it may be
// a performance issue since it may lead to repeated
// Runtime::DeclareContextSlot() calls.
- VariableProxy* proxy = top_scope_->NewUnresolved(name, inside_with());
- top_scope_->AddDeclaration(new(zone()) Declaration(proxy, mode, fun));
+ VariableProxy* proxy = declaration_scope->NewUnresolved(name, false);
+ declaration_scope->AddDeclaration(new(zone()) Declaration(proxy, mode, fun));
// For global const variables we bind the proxy to a variable.
- if (mode == Variable::CONST && top_scope_->is_global_scope()) {
+ if (mode == Variable::CONST && declaration_scope->is_global_scope()) {
ASSERT(resolve); // should be set by all callers
Variable::Kind kind = Variable::NORMAL;
- var = new(zone()) Variable(top_scope_, name, Variable::CONST, true, kind);
+ var = new(zone()) Variable(declaration_scope,
+ name,
+ Variable::CONST,
+ true,
+ kind);
}
// If requested and we have a local variable, bind the proxy to the variable
@@ -1407,7 +1413,7 @@ Statement* Parser::ParseNativeDeclaration(bool* ok) {
// isn't lazily compiled. The extension structures are only
// accessible while parsing the first time not when reparsing
// because of lazy compilation.
- top_scope_->ForceEagerCompilation();
+ top_scope_->DeclarationScope()->ForceEagerCompilation();
// Compute the function template for the native function.
v8::Handle<v8::FunctionTemplate> fun_template =
@@ -1485,8 +1491,8 @@ Block* Parser::ParseVariableStatement(bool* ok) {
// VariableStatement ::
// VariableDeclarations ';'
- Expression* dummy; // to satisfy the ParseVariableDeclarations() signature
- Block* result = ParseVariableDeclarations(true, &dummy, CHECK_OK);
+ Handle<String> ignore;
+ Block* result = ParseVariableDeclarations(true, &ignore, CHECK_OK);
ExpectSemicolon(CHECK_OK);
return result;
}
@@ -1504,18 +1510,19 @@ bool Parser::IsEvalOrArguments(Handle<String> string) {
// to initialize it properly. This mechanism is used for the parsing
// of 'for-in' loops.
Block* Parser::ParseVariableDeclarations(bool accept_IN,
- Expression** var,
+ Handle<String>* out,
bool* ok) {
// VariableDeclarations ::
// ('var' | 'const') (Identifier ('=' AssignmentExpression)?)+[',']
Variable::Mode mode = Variable::VAR;
bool is_const = false;
+ Scope* declaration_scope = top_scope_->DeclarationScope();
if (peek() == Token::VAR) {
Consume(Token::VAR);
} else if (peek() == Token::CONST) {
Consume(Token::CONST);
- if (top_scope_->is_strict_mode()) {
+ if (declaration_scope->is_strict_mode()) {
ReportMessage("strict_const", Vector<const char*>::empty());
*ok = false;
return NULL;
@@ -1540,18 +1547,18 @@ Block* Parser::ParseVariableDeclarations(bool accept_IN,
//
// Create new block with one expected declaration.
Block* block = new(zone()) Block(NULL, 1, true);
- VariableProxy* last_var = NULL; // the last variable declared
int nvars = 0; // the number of variables declared
+ Handle<String> name;
do {
if (fni_ != NULL) fni_->Enter();
// Parse variable name.
if (nvars > 0) Consume(Token::COMMA);
- Handle<String> name = ParseIdentifier(CHECK_OK);
+ name = ParseIdentifier(CHECK_OK);
if (fni_ != NULL) fni_->PushVariableName(name);
// Strict mode variables may not be named eval or arguments
- if (top_scope_->is_strict_mode() && IsEvalOrArguments(name)) {
+ if (declaration_scope->is_strict_mode() && IsEvalOrArguments(name)) {
ReportMessage("strict_var_name", Vector<const char*>::empty());
*ok = false;
return NULL;
@@ -1569,11 +1576,10 @@ Block* Parser::ParseVariableDeclarations(bool accept_IN,
// If we have a const declaration, in an inner scope, the proxy is always
// bound to the declared variable (independent of possibly surrounding with
// statements).
- last_var = Declare(name, mode, NULL,
- is_const /* always bound for CONST! */,
- CHECK_OK);
+ Declare(name, mode, NULL, is_const /* always bound for CONST! */,
+ CHECK_OK);
nvars++;
- if (top_scope_->num_var_or_const() > kMaxNumFunctionLocals) {
+ if (declaration_scope->num_var_or_const() > kMaxNumFunctionLocals) {
ReportMessageAt(scanner().location(), "too_many_variables",
Vector<const char*>::empty());
*ok = false;
@@ -1589,10 +1595,10 @@ Block* Parser::ParseVariableDeclarations(bool accept_IN,
//
// var v; v = x;
//
- // In particular, we need to re-lookup 'v' as it may be a
- // different 'v' than the 'v' in the declaration (if we are inside
- // a 'with' statement that makes a object property with name 'v'
- // visible).
+ // In particular, we need to re-lookup 'v' (in top_scope_, not
+ // declaration_scope) as it may be a different 'v' than the 'v' in the
+ // declaration (e.g., if we are inside a 'with' statement or 'catch'
+ // block).
//
// However, note that const declarations are different! A const
// declaration of the form:
@@ -1607,6 +1613,7 @@ Block* Parser::ParseVariableDeclarations(bool accept_IN,
// one - there is no re-lookup (see the last parameter of the
// Declare() call above).
+ Scope* initialization_scope = is_const ? declaration_scope : top_scope_;
Expression* value = NULL;
int position = -1;
if (peek() == Token::ASSIGN) {
@@ -1647,7 +1654,7 @@ Block* Parser::ParseVariableDeclarations(bool accept_IN,
// browsers where the global object (window) has lots of
// properties defined in prototype objects.
- if (top_scope_->is_global_scope()) {
+ if (initialization_scope->is_global_scope()) {
// Compute the arguments for the runtime call.
ZoneList<Expression*>* arguments = new(zone()) ZoneList<Expression*>(3);
// We have at least 1 parameter.
@@ -1670,8 +1677,10 @@ Block* Parser::ParseVariableDeclarations(bool accept_IN,
} else {
// Add strict mode.
// We may want to pass singleton to avoid Literal allocations.
- arguments->Add(NewNumberLiteral(
- top_scope_->is_strict_mode() ? kStrictMode : kNonStrictMode));
+ StrictModeFlag flag = initialization_scope->is_strict_mode()
+ ? kStrictMode
+ : kNonStrictMode;
+ arguments->Add(NewNumberLiteral(flag));
// Be careful not to assign a value to the global variable if
// we're in a with. The initialization value should not
@@ -1708,8 +1717,11 @@ Block* Parser::ParseVariableDeclarations(bool accept_IN,
// the top context for variables). Sigh...
if (value != NULL) {
Token::Value op = (is_const ? Token::INIT_CONST : Token::INIT_VAR);
+ bool in_with = is_const ? false : inside_with();
+ VariableProxy* proxy =
+ initialization_scope->NewUnresolved(name, in_with);
Assignment* assignment =
- new(zone()) Assignment(op, last_var, value, position);
+ new(zone()) Assignment(op, proxy, value, position);
if (block) {
block->AddStatement(new(zone()) ExpressionStatement(assignment));
}
@@ -1718,10 +1730,10 @@ Block* Parser::ParseVariableDeclarations(bool accept_IN,
if (fni_ != NULL) fni_->Leave();
} while (peek() == Token::COMMA);
- if (!is_const && nvars == 1) {
- // We have a single, non-const variable.
- ASSERT(last_var != NULL);
- *var = last_var;
+ // If there was a single non-const declaration, return it in the output
+ // parameter for possible use by for/in.
+ if (nvars == 1 && !is_const) {
+ *out = name;
}
return block;
@@ -1895,7 +1907,9 @@ Statement* Parser::ParseReturnStatement(bool* ok) {
// function. See ECMA-262, section 12.9, page 67.
//
// To be consistent with KJS we report the syntax error at runtime.
- if (!top_scope_->is_function_scope()) {
+ Scope* declaration_scope = top_scope_->DeclarationScope();
+ if (declaration_scope->is_global_scope() ||
+ declaration_scope->is_eval_scope()) {
Handle<String> type = isolate()->factory()->illegal_return_symbol();
Expression* throw_error = NewThrowSyntaxError(type, Handle<Object>::null());
return new(zone()) ExpressionStatement(throw_error);
@@ -1922,7 +1936,7 @@ Block* Parser::WithHelper(Expression* obj, ZoneStringList* labels, bool* ok) {
Statement* stat;
{ Target target(&this->target_stack_, &collector);
with_nesting_level_++;
- top_scope_->RecordWithStatement();
+ top_scope_->DeclarationScope()->RecordWithStatement();
stat = ParseStatement(labels, CHECK_OK);
with_nesting_level_--;
}
@@ -2082,6 +2096,8 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
// block. Since we don't know yet if there will be a finally block, we
// always collect the targets.
TargetCollector catch_collector;
+ Scope* catch_scope = NULL;
+ Variable* catch_variable = NULL;
Block* catch_block = NULL;
Handle<String> name;
if (tok == Token::CATCH) {
@@ -2108,10 +2124,16 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
TargetCollector inner_collector;
{ Target target(&this->target_stack_, &catch_collector);
{ Target target(&this->target_stack_, &inner_collector);
- ++with_nesting_level_;
- top_scope_->RecordWithStatement();
+ catch_scope = NewScope(top_scope_, Scope::CATCH_SCOPE, inside_with());
+ if (top_scope_->is_strict_mode()) {
+ catch_scope->EnableStrictMode();
+ }
+ catch_variable = catch_scope->DeclareLocal(name, Variable::VAR);
+
+ Scope* saved_scope = top_scope_;
+ top_scope_ = catch_scope;
inner_body = ParseBlock(NULL, CHECK_OK);
- --with_nesting_level_;
+ top_scope_ = saved_scope;
}
}
@@ -2145,19 +2167,28 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
// 'try { try B0 catch B1 } finally B2'
if (catch_block != NULL && finally_block != NULL) {
+ // If we have both, create an inner try/catch.
+ ASSERT(catch_scope != NULL && catch_variable != NULL);
TryCatchStatement* statement =
- new(zone()) TryCatchStatement(try_block, name, catch_block);
+ new(zone()) TryCatchStatement(try_block,
+ catch_scope,
+ catch_variable,
+ catch_block);
statement->set_escaping_targets(try_collector.targets());
try_block = new(zone()) Block(NULL, 1, false);
try_block->AddStatement(statement);
- catch_block = NULL;
+ catch_block = NULL; // Clear to indicate it's been handled.
}
TryStatement* result = NULL;
if (catch_block != NULL) {
ASSERT(finally_block == NULL);
+ ASSERT(catch_scope != NULL && catch_variable != NULL);
result =
- new(zone()) TryCatchStatement(try_block, name, catch_block);
+ new(zone()) TryCatchStatement(try_block,
+ catch_scope,
+ catch_variable,
+ catch_block);
} else {
ASSERT(finally_block != NULL);
result = new(zone()) TryFinallyStatement(try_block, finally_block);
@@ -2230,10 +2261,12 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
Expect(Token::LPAREN, CHECK_OK);
if (peek() != Token::SEMICOLON) {
if (peek() == Token::VAR || peek() == Token::CONST) {
- Expression* each = NULL;
+ Handle<String> name;
Block* variable_statement =
- ParseVariableDeclarations(false, &each, CHECK_OK);
- if (peek() == Token::IN && each != NULL) {
+ ParseVariableDeclarations(false, &name, CHECK_OK);
+
+ if (peek() == Token::IN && !name.is_null()) {
+ VariableProxy* each = top_scope_->NewUnresolved(name, inside_with());
ForInStatement* loop = new(zone()) ForInStatement(labels);
Target target(&this->target_stack_, loop);
@@ -2901,8 +2934,7 @@ Expression* Parser::ParsePrimaryExpression(bool* ok) {
switch (peek()) {
case Token::THIS: {
Consume(Token::THIS);
- VariableProxy* recv = top_scope_->receiver();
- result = recv;
+ result = new(zone()) VariableProxy(top_scope_->receiver());
break;
}
@@ -3762,7 +3794,7 @@ Expression* Parser::ParseV8Intrinsic(bool* ok) {
if (extension_ != NULL) {
// The extension structures are only accessible while parsing the
// very first time not when reparsing because of lazy compilation.
- top_scope_->ForceEagerCompilation();
+ top_scope_->DeclarationScope()->ForceEagerCompilation();
}
const Runtime::Function* function = Runtime::FunctionForSymbol(name);
diff --git a/deps/v8/src/parser.h b/deps/v8/src/parser.h
index ea2e0d529..9ce1026c9 100644
--- a/deps/v8/src/parser.h
+++ b/deps/v8/src/parser.h
@@ -436,7 +436,7 @@ class Parser {
const char* message,
Vector<Handle<String> > args);
- protected:
+ private:
// Limit on number of function parameters is chosen arbitrarily.
// Code::Flags uses only the low 17 bits of num-parameters to
// construct a hashable id, so if more than 2^17 are allowed, this
@@ -484,7 +484,9 @@ class Parser {
Statement* ParseNativeDeclaration(bool* ok);
Block* ParseBlock(ZoneStringList* labels, bool* ok);
Block* ParseVariableStatement(bool* ok);
- Block* ParseVariableDeclarations(bool accept_IN, Expression** var, bool* ok);
+ Block* ParseVariableDeclarations(bool accept_IN,
+ Handle<String>* out,
+ bool* ok);
Statement* ParseExpressionOrLabelledStatement(ZoneStringList* labels,
bool* ok);
IfStatement* ParseIfStatement(ZoneStringList* labels, bool* ok);
diff --git a/deps/v8/src/platform-solaris.cc b/deps/v8/src/platform-solaris.cc
index dd4bd5d7c..bbd982c31 100644
--- a/deps/v8/src/platform-solaris.cc
+++ b/deps/v8/src/platform-solaris.cc
@@ -88,6 +88,7 @@ double ceiling(double x) {
}
+static Mutex* limit_mutex = NULL;
void OS::Setup() {
// Seed the random number generator.
// Convert the current time to a 64-bit integer first, before converting it
@@ -96,6 +97,7 @@ void OS::Setup() {
// call this setup code within the same millisecond.
uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
srandom(static_cast<unsigned int>(seed));
+ limit_mutex = CreateMutex();
}
@@ -145,6 +147,9 @@ static void* highest_ever_allocated = reinterpret_cast<void*>(0);
static void UpdateAllocatedSpaceLimits(void* address, int size) {
+ ASSERT(limit_mutex != NULL);
+ ScopedLock lock(limit_mutex);
+
lowest_ever_allocated = Min(lowest_ever_allocated, address);
highest_ever_allocated =
Max(highest_ever_allocated,
@@ -407,7 +412,6 @@ static void* ThreadEntry(void* arg) {
// one) so we initialize it here too.
thread->data()->thread_ = pthread_self();
ASSERT(thread->data()->thread_ != kNoThread);
- Thread::SetThreadLocal(Isolate::isolate_key(), thread->isolate());
thread->Run();
return NULL;
}
@@ -587,78 +591,172 @@ Semaphore* OS::CreateSemaphore(int count) {
#ifdef ENABLE_LOGGING_AND_PROFILING
-static Sampler* active_sampler_ = NULL;
-static pthread_t vm_tid_ = 0;
-
-
static pthread_t GetThreadID() {
return pthread_self();
}
-
static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
USE(info);
if (signal != SIGPROF) return;
- if (active_sampler_ == NULL || !active_sampler_->IsActive()) return;
- if (vm_tid_ != GetThreadID()) return;
+ Isolate* isolate = Isolate::UncheckedCurrent();
+ if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) {
+ // We require a fully initialized and entered isolate.
+ return;
+ }
+ if (v8::Locker::IsActive() &&
+ !isolate->thread_manager()->IsLockedByCurrentThread()) {
+ return;
+ }
+
+ Sampler* sampler = isolate->logger()->sampler();
+ if (sampler == NULL || !sampler->IsActive()) return;
TickSample sample_obj;
- TickSample* sample = CpuProfiler::TickSampleEvent();
+ TickSample* sample = CpuProfiler::TickSampleEvent(isolate);
if (sample == NULL) sample = &sample_obj;
// Extracting the sample from the context is extremely machine dependent.
ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
mcontext_t& mcontext = ucontext->uc_mcontext;
- sample->state = Top::current_vm_state();
+ sample->state = isolate->current_vm_state();
sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_PC]);
sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_SP]);
sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_FP]);
- active_sampler_->SampleStack(sample);
- active_sampler_->Tick(sample);
+ sampler->SampleStack(sample);
+ sampler->Tick(sample);
}
-
class Sampler::PlatformData : public Malloced {
public:
+ PlatformData() : vm_tid_(GetThreadID()) {}
+
+ pthread_t vm_tid() const { return vm_tid_; }
+
+ private:
+ pthread_t vm_tid_;
+};
+
+
+class SignalSender : public Thread {
+ public:
enum SleepInterval {
- FULL_INTERVAL,
- HALF_INTERVAL
+ HALF_INTERVAL,
+ FULL_INTERVAL
};
- explicit PlatformData(Sampler* sampler)
- : sampler_(sampler),
- signal_handler_installed_(false),
- vm_tgid_(getpid()),
- signal_sender_launched_(false) {
+ explicit SignalSender(int interval)
+ : Thread("SignalSender"),
+ interval_(interval) {}
+
+ static void InstallSignalHandler() {
+ struct sigaction sa;
+ sa.sa_sigaction = ProfilerSignalHandler;
+ sigemptyset(&sa.sa_mask);
+ sa.sa_flags = SA_RESTART | SA_SIGINFO;
+ signal_handler_installed_ =
+ (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
+ }
+
+ static void RestoreSignalHandler() {
+ if (signal_handler_installed_) {
+ sigaction(SIGPROF, &old_signal_handler_, 0);
+ signal_handler_installed_ = false;
+ }
+ }
+
+ static void AddActiveSampler(Sampler* sampler) {
+ ScopedLock lock(mutex_);
+ SamplerRegistry::AddActiveSampler(sampler);
+ if (instance_ == NULL) {
+ // Start a thread that will send SIGPROF signal to VM threads,
+ // when CPU profiling will be enabled.
+ instance_ = new SignalSender(sampler->interval());
+ instance_->Start();
+ } else {
+ ASSERT(instance_->interval_ == sampler->interval());
+ }
+ }
+
+ static void RemoveActiveSampler(Sampler* sampler) {
+ ScopedLock lock(mutex_);
+ SamplerRegistry::RemoveActiveSampler(sampler);
+ if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
+ RuntimeProfiler::WakeUpRuntimeProfilerThreadBeforeShutdown();
+ instance_->Join();
+ delete instance_;
+ instance_ = NULL;
+ RestoreSignalHandler();
+ }
}
- void SignalSender() {
- while (sampler_->IsActive()) {
- if (rate_limiter_.SuspendIfNecessary()) continue;
- if (sampler_->IsProfiling() && RuntimeProfiler::IsEnabled()) {
- SendProfilingSignal();
+ // Implement Thread::Run().
+ virtual void Run() {
+ SamplerRegistry::State state;
+ while ((state = SamplerRegistry::GetState()) !=
+ SamplerRegistry::HAS_NO_SAMPLERS) {
+ bool cpu_profiling_enabled =
+ (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
+ bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
+ if (cpu_profiling_enabled && !signal_handler_installed_) {
+ InstallSignalHandler();
+ } else if (!cpu_profiling_enabled && signal_handler_installed_) {
+ RestoreSignalHandler();
+ }
+
+ // When CPU profiling is enabled both JavaScript and C++ code is
+ // profiled. We must not suspend.
+ if (!cpu_profiling_enabled) {
+ if (rate_limiter_.SuspendIfNecessary()) continue;
+ }
+ if (cpu_profiling_enabled && runtime_profiler_enabled) {
+ if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
+ return;
+ }
Sleep(HALF_INTERVAL);
- RuntimeProfiler::NotifyTick();
+ if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) {
+ return;
+ }
Sleep(HALF_INTERVAL);
} else {
- if (sampler_->IsProfiling()) SendProfilingSignal();
- if (RuntimeProfiler::IsEnabled()) RuntimeProfiler::NotifyTick();
+ if (cpu_profiling_enabled) {
+ if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile,
+ this)) {
+ return;
+ }
+ }
+ if (runtime_profiler_enabled) {
+ if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile,
+ NULL)) {
+ return;
+ }
+ }
Sleep(FULL_INTERVAL);
}
}
}
- void SendProfilingSignal() {
+ static void DoCpuProfile(Sampler* sampler, void* raw_sender) {
+ if (!sampler->IsProfiling()) return;
+ SignalSender* sender = reinterpret_cast<SignalSender*>(raw_sender);
+ sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
+ }
+
+ static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
+ if (!sampler->isolate()->IsInitialized()) return;
+ sampler->isolate()->runtime_profiler()->NotifyTick();
+ }
+
+ void SendProfilingSignal(pthread_t tid) {
if (!signal_handler_installed_) return;
- pthread_kill(vm_tid_, SIGPROF);
+ pthread_kill(tid, SIGPROF);
}
void Sleep(SleepInterval full_or_half) {
// Convert ms to us and subtract 100 us to compensate delays
// occuring during signal delivery.
- useconds_t interval = sampler_->interval_ * 1000 - 100;
+ useconds_t interval = interval_ * 1000 - 100;
if (full_or_half == HALF_INTERVAL) interval /= 2;
int result = usleep(interval);
#ifdef DEBUG
@@ -673,22 +771,22 @@ class Sampler::PlatformData : public Malloced {
USE(result);
}
- Sampler* sampler_;
- bool signal_handler_installed_;
- struct sigaction old_signal_handler_;
- int vm_tgid_;
- bool signal_sender_launched_;
- pthread_t signal_sender_thread_;
+ const int interval_;
RuntimeProfilerRateLimiter rate_limiter_;
-};
+ // Protects the process wide state below.
+ static Mutex* mutex_;
+ static SignalSender* instance_;
+ static bool signal_handler_installed_;
+ static struct sigaction old_signal_handler_;
-static void* SenderEntry(void* arg) {
- Sampler::PlatformData* data =
- reinterpret_cast<Sampler::PlatformData*>(arg);
- data->SignalSender();
- return 0;
-}
+ DISALLOW_COPY_AND_ASSIGN(SignalSender);
+};
+
+Mutex* SignalSender::mutex_ = OS::CreateMutex();
+SignalSender* SignalSender::instance_ = NULL;
+struct sigaction SignalSender::old_signal_handler_;
+bool SignalSender::signal_handler_installed_ = false;
Sampler::Sampler(Isolate* isolate, int interval)
@@ -697,63 +795,27 @@ Sampler::Sampler(Isolate* isolate, int interval)
profiling_(false),
active_(false),
samples_taken_(0) {
- data_ = new PlatformData(this);
+ data_ = new PlatformData;
}
Sampler::~Sampler() {
- ASSERT(!data_->signal_sender_launched_);
+ ASSERT(!IsActive());
delete data_;
}
void Sampler::Start() {
- // There can only be one active sampler at the time on POSIX
- // platforms.
ASSERT(!IsActive());
- vm_tid_ = GetThreadID();
-
- // Request profiling signals.
- struct sigaction sa;
- sa.sa_sigaction = ProfilerSignalHandler;
- sigemptyset(&sa.sa_mask);
- sa.sa_flags = SA_RESTART | SA_SIGINFO;
- data_->signal_handler_installed_ =
- sigaction(SIGPROF, &sa, &data_->old_signal_handler_) == 0;
-
- // Start a thread that sends SIGPROF signal to VM thread.
- // Sending the signal ourselves instead of relying on itimer provides
- // much better accuracy.
SetActive(true);
- if (pthread_create(
- &data_->signal_sender_thread_, NULL, SenderEntry, data_) == 0) {
- data_->signal_sender_launched_ = true;
- }
-
- // Set this sampler as the active sampler.
- active_sampler_ = this;
+ SignalSender::AddActiveSampler(this);
}
void Sampler::Stop() {
+ ASSERT(IsActive());
+ SignalSender::RemoveActiveSampler(this);
SetActive(false);
-
- // Wait for signal sender termination (it will exit after setting
- // active_ to false).
- if (data_->signal_sender_launched_) {
- Top::WakeUpRuntimeProfilerThreadBeforeShutdown();
- pthread_join(data_->signal_sender_thread_, NULL);
- data_->signal_sender_launched_ = false;
- }
-
- // Restore old signal handler
- if (data_->signal_handler_installed_) {
- sigaction(SIGPROF, &data_->old_signal_handler_, 0);
- data_->signal_handler_installed_ = false;
- }
-
- // This sampler is no longer the active sampler.
- active_sampler_ = NULL;
}
#endif // ENABLE_LOGGING_AND_PROFILING
diff --git a/deps/v8/src/prettyprinter.cc b/deps/v8/src/prettyprinter.cc
index cd38d1334..f18b3203e 100644
--- a/deps/v8/src/prettyprinter.cc
+++ b/deps/v8/src/prettyprinter.cc
@@ -203,7 +203,7 @@ void PrettyPrinter::VisitTryCatchStatement(TryCatchStatement* node) {
Visit(node->try_block());
Print(" catch (");
const bool quote = false;
- PrintLiteral(node->name(), quote);
+ PrintLiteral(node->variable()->name(), quote);
Print(") ");
Visit(node->catch_block());
}
@@ -856,8 +856,9 @@ void AstPrinter::VisitForInStatement(ForInStatement* node) {
void AstPrinter::VisitTryCatchStatement(TryCatchStatement* node) {
IndentedScope indent(this, "TRY CATCH");
PrintIndentedVisit("TRY", node->try_block());
- const bool quote = false;
- PrintLiteralIndented("CATCHVAR", node->name(), quote);
+ PrintLiteralWithModeIndented("CATCHVAR",
+ node->variable(),
+ node->variable()->name());
PrintIndentedVisit("CATCH", node->catch_block());
}
@@ -1244,7 +1245,7 @@ void JsonAstBuilder::VisitForInStatement(ForInStatement* stmt) {
void JsonAstBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
TagScope tag(this, "TryCatchStatement");
{ AttributesScope attributes(this);
- AddAttribute("variable", stmt->name());
+ AddAttribute("variable", stmt->variable()->name());
}
Visit(stmt->try_block());
Visit(stmt->catch_block());
diff --git a/deps/v8/src/profile-generator.cc b/deps/v8/src/profile-generator.cc
index b2c9de852..34d7aa634 100644
--- a/deps/v8/src/profile-generator.cc
+++ b/deps/v8/src/profile-generator.cc
@@ -1635,7 +1635,8 @@ HeapObject *const V8HeapExplorer::kGcRootsObject =
V8HeapExplorer::V8HeapExplorer(
HeapSnapshot* snapshot,
SnapshottingProgressReportingInterface* progress)
- : snapshot_(snapshot),
+ : heap_(Isolate::Current()->heap()),
+ snapshot_(snapshot),
collection_(snapshot_->collection()),
progress_(progress),
filler_(NULL) {
@@ -1725,10 +1726,14 @@ HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object,
: "",
children_count,
retainers_count);
- } else if (object->IsFixedArray() || object->IsByteArray()) {
+ } else if (object->IsFixedArray() ||
+ object->IsFixedDoubleArray() ||
+ object->IsByteArray() ||
+ object->IsExternalArray()) {
+ const char* tag = objects_tags_.GetTag(object);
return AddEntry(object,
HeapEntry::kArray,
- "",
+ tag != NULL ? tag : "",
children_count,
retainers_count);
} else if (object->IsHeapNumber()) {
@@ -1836,15 +1841,13 @@ void V8HeapExplorer::ExtractReferences(HeapObject* obj) {
HeapEntry* entry = GetEntry(obj);
if (entry == NULL) return; // No interest in this object.
+ bool extract_indexed_refs = true;
if (obj->IsJSGlobalProxy()) {
// We need to reference JS global objects from snapshot's root.
// We use JSGlobalProxy because this is what embedder (e.g. browser)
// uses for the global object.
JSGlobalProxy* proxy = JSGlobalProxy::cast(obj);
SetRootShortcutReference(proxy->map()->prototype());
- SetInternalReference(obj, entry, "map", obj->map(), HeapObject::kMapOffset);
- IndexedReferencesExtractor refs_extractor(this, obj, entry);
- obj->Iterate(&refs_extractor);
} else if (obj->IsJSObject()) {
JSObject* js_obj = JSObject::cast(obj);
ExtractClosureReferences(js_obj, entry);
@@ -1852,7 +1855,7 @@ void V8HeapExplorer::ExtractReferences(HeapObject* obj) {
ExtractElementReferences(js_obj, entry);
ExtractInternalReferences(js_obj, entry);
SetPropertyReference(
- obj, entry, HEAP->Proto_symbol(), js_obj->GetPrototype());
+ obj, entry, heap_->Proto_symbol(), js_obj->GetPrototype());
if (obj->IsJSFunction()) {
JSFunction* js_fun = JSFunction::cast(js_obj);
Object* proto_or_map = js_fun->prototype_or_initial_map();
@@ -1860,39 +1863,49 @@ void V8HeapExplorer::ExtractReferences(HeapObject* obj) {
if (!proto_or_map->IsMap()) {
SetPropertyReference(
obj, entry,
- HEAP->prototype_symbol(), proto_or_map,
+ heap_->prototype_symbol(), proto_or_map,
JSFunction::kPrototypeOrInitialMapOffset);
} else {
SetPropertyReference(
obj, entry,
- HEAP->prototype_symbol(), js_fun->prototype());
+ heap_->prototype_symbol(), js_fun->prototype());
}
}
SetInternalReference(js_fun, entry,
"shared", js_fun->shared(),
JSFunction::kSharedFunctionInfoOffset);
+ TagObject(js_fun->unchecked_context(), "(context)");
SetInternalReference(js_fun, entry,
"context", js_fun->unchecked_context(),
JSFunction::kContextOffset);
+ TagObject(js_fun->literals(), "(function literals)");
SetInternalReference(js_fun, entry,
"literals", js_fun->literals(),
JSFunction::kLiteralsOffset);
}
+ TagObject(js_obj->properties(), "(object properties)");
SetInternalReference(obj, entry,
"properties", js_obj->properties(),
JSObject::kPropertiesOffset);
+ TagObject(js_obj->elements(), "(object elements)");
SetInternalReference(obj, entry,
"elements", js_obj->elements(),
JSObject::kElementsOffset);
- SetInternalReference(obj, entry, "map", obj->map(), HeapObject::kMapOffset);
- IndexedReferencesExtractor refs_extractor(this, obj, entry);
- obj->Iterate(&refs_extractor);
} else if (obj->IsString()) {
if (obj->IsConsString()) {
ConsString* cs = ConsString::cast(obj);
SetInternalReference(obj, entry, 1, cs->first());
SetInternalReference(obj, entry, 2, cs->second());
}
+ extract_indexed_refs = false;
+ } else if (obj->IsGlobalContext()) {
+ Context* context = Context::cast(obj);
+ TagObject(context->jsfunction_result_caches(),
+ "(context func. result caches)");
+ TagObject(context->normalized_map_cache(), "(context norm. map cache)");
+ TagObject(context->runtime_context(), "(runtime context)");
+ TagObject(context->map_cache(), "(context map cache)");
+ TagObject(context->data(), "(context data)");
} else if (obj->IsMap()) {
Map* map = Map::cast(obj);
SetInternalReference(obj, entry,
@@ -1901,6 +1914,7 @@ void V8HeapExplorer::ExtractReferences(HeapObject* obj) {
"constructor", map->constructor(),
Map::kConstructorOffset);
if (!map->instance_descriptors()->IsEmpty()) {
+ TagObject(map->instance_descriptors(), "(map descriptors)");
SetInternalReference(obj, entry,
"descriptors", map->instance_descriptors(),
Map::kInstanceDescriptorsOrBitField3Offset);
@@ -1908,9 +1922,6 @@ void V8HeapExplorer::ExtractReferences(HeapObject* obj) {
SetInternalReference(obj, entry,
"code_cache", map->code_cache(),
Map::kCodeCacheOffset);
- SetInternalReference(obj, entry, "map", obj->map(), HeapObject::kMapOffset);
- IndexedReferencesExtractor refs_extractor(this, obj, entry);
- obj->Iterate(&refs_extractor);
} else if (obj->IsSharedFunctionInfo()) {
SharedFunctionInfo* shared = SharedFunctionInfo::cast(obj);
SetInternalReference(obj, entry,
@@ -1919,16 +1930,61 @@ void V8HeapExplorer::ExtractReferences(HeapObject* obj) {
SetInternalReference(obj, entry,
"code", shared->unchecked_code(),
SharedFunctionInfo::kCodeOffset);
+ TagObject(shared->scope_info(), "(function scope info)");
+ SetInternalReference(obj, entry,
+ "scope_info", shared->scope_info(),
+ SharedFunctionInfo::kScopeInfoOffset);
SetInternalReference(obj, entry,
"instance_class_name", shared->instance_class_name(),
SharedFunctionInfo::kInstanceClassNameOffset);
SetInternalReference(obj, entry,
"script", shared->script(),
SharedFunctionInfo::kScriptOffset);
- SetInternalReference(obj, entry, "map", obj->map(), HeapObject::kMapOffset);
- IndexedReferencesExtractor refs_extractor(this, obj, entry);
- obj->Iterate(&refs_extractor);
- } else {
+ } else if (obj->IsScript()) {
+ Script* script = Script::cast(obj);
+ SetInternalReference(obj, entry,
+ "source", script->source(),
+ Script::kSourceOffset);
+ SetInternalReference(obj, entry,
+ "name", script->name(),
+ Script::kNameOffset);
+ SetInternalReference(obj, entry,
+ "data", script->data(),
+ Script::kDataOffset);
+ SetInternalReference(obj, entry,
+ "context_data", script->context_data(),
+ Script::kContextOffset);
+ TagObject(script->line_ends(), "(script line ends)");
+ SetInternalReference(obj, entry,
+ "line_ends", script->line_ends(),
+ Script::kLineEndsOffset);
+ } else if (obj->IsDescriptorArray()) {
+ DescriptorArray* desc_array = DescriptorArray::cast(obj);
+ if (desc_array->length() > DescriptorArray::kContentArrayIndex) {
+ Object* content_array =
+ desc_array->get(DescriptorArray::kContentArrayIndex);
+ TagObject(content_array, "(map descriptor content)");
+ SetInternalReference(obj, entry,
+ "content", content_array,
+ FixedArray::OffsetOfElementAt(
+ DescriptorArray::kContentArrayIndex));
+ }
+ } else if (obj->IsCodeCache()) {
+ CodeCache* code_cache = CodeCache::cast(obj);
+ TagObject(code_cache->default_cache(), "(default code cache)");
+ SetInternalReference(obj, entry,
+ "default_cache", code_cache->default_cache(),
+ CodeCache::kDefaultCacheOffset);
+ TagObject(code_cache->normal_type_cache(), "(code type cache)");
+ SetInternalReference(obj, entry,
+ "type_cache", code_cache->normal_type_cache(),
+ CodeCache::kNormalTypeCacheOffset);
+ } else if (obj->IsCode()) {
+ Code* code = Code::cast(obj);
+ TagObject(code->unchecked_relocation_info(), "(code relocation info)");
+ TagObject(code->unchecked_deoptimization_data(), "(code deopt data)");
+ }
+ if (extract_indexed_refs) {
SetInternalReference(obj, entry, "map", obj->map(), HeapObject::kMapOffset);
IndexedReferencesExtractor refs_extractor(this, obj, entry);
obj->Iterate(&refs_extractor);
@@ -2086,7 +2142,7 @@ bool V8HeapExplorer::IterateAndExtractReferences(
}
SetRootGcRootsReference();
RootsReferencesExtractor extractor(this);
- HEAP->IterateRoots(&extractor, VISIT_ALL);
+ heap_->IterateRoots(&extractor, VISIT_ALL);
filler_ = NULL;
return progress_->ProgressReport(false);
}
@@ -2241,6 +2297,18 @@ void V8HeapExplorer::SetGcRootsReference(Object* child_obj) {
}
+void V8HeapExplorer::TagObject(Object* obj, const char* tag) {
+ if (obj->IsHeapObject() &&
+ !obj->IsOddball() &&
+ obj != heap_->raw_unchecked_empty_byte_array() &&
+ obj != heap_->raw_unchecked_empty_fixed_array() &&
+ obj != heap_->raw_unchecked_empty_fixed_double_array() &&
+ obj != heap_->raw_unchecked_empty_descriptor_array()) {
+ objects_tags_.SetTag(obj, tag);
+ }
+}
+
+
class GlobalObjectsEnumerator : public ObjectVisitor {
public:
virtual void VisitPointers(Object** start, Object** end) {
diff --git a/deps/v8/src/profile-generator.h b/deps/v8/src/profile-generator.h
index 6343d057c..3d0584b63 100644
--- a/deps/v8/src/profile-generator.h
+++ b/deps/v8/src/profile-generator.h
@@ -973,9 +973,11 @@ class V8HeapExplorer : public HeapEntriesAllocator {
void SetRootShortcutReference(Object* child);
void SetRootGcRootsReference();
void SetGcRootsReference(Object* child);
+ void TagObject(Object* obj, const char* tag);
HeapEntry* GetEntry(Object* obj);
+ Heap* heap_;
HeapSnapshot* snapshot_;
HeapSnapshotsCollection* collection_;
SnapshottingProgressReportingInterface* progress_;
diff --git a/deps/v8/src/rewriter.cc b/deps/v8/src/rewriter.cc
index aa274d484..64d7b3684 100644
--- a/deps/v8/src/rewriter.cc
+++ b/deps/v8/src/rewriter.cc
@@ -218,7 +218,7 @@ bool Rewriter::Rewrite(CompilationInfo* info) {
ASSERT(function != NULL);
Scope* scope = function->scope();
ASSERT(scope != NULL);
- if (scope->is_function_scope()) return true;
+ if (!scope->is_global_scope() && !scope->is_eval_scope()) return true;
ZoneList<Statement*>* body = function->body();
if (!body->is_empty()) {
diff --git a/deps/v8/src/runtime-profiler.cc b/deps/v8/src/runtime-profiler.cc
index 816569a02..c0eaf9877 100644
--- a/deps/v8/src/runtime-profiler.cc
+++ b/deps/v8/src/runtime-profiler.cc
@@ -43,32 +43,6 @@ namespace v8 {
namespace internal {
-class PendingListNode : public Malloced {
- public:
- explicit PendingListNode(JSFunction* function);
- ~PendingListNode() { Destroy(); }
-
- PendingListNode* next() const { return next_; }
- void set_next(PendingListNode* node) { next_ = node; }
- Handle<JSFunction> function() { return Handle<JSFunction>::cast(function_); }
-
- // If the function is garbage collected before we've had the chance
- // to optimize it the weak handle will be null.
- bool IsValid() { return !function_.is_null(); }
-
- // Returns the number of microseconds this node has been pending.
- int Delay() const { return static_cast<int>(OS::Ticks() - start_); }
-
- private:
- void Destroy();
- static void WeakCallback(v8::Persistent<v8::Value> object, void* data);
-
- PendingListNode* next_;
- Handle<Object> function_; // Weak handle.
- int64_t start_;
-};
-
-
// Optimization sampler constants.
static const int kSamplerFrameCount = 2;
static const int kSamplerFrameWeight[kSamplerFrameCount] = { 2, 1 };
@@ -80,33 +54,10 @@ static const int kSamplerThresholdMin = 1;
static const int kSamplerThresholdDelta = 1;
static const int kSamplerThresholdSizeFactorInit = 3;
-static const int kSamplerThresholdSizeFactorMin = 1;
-static const int kSamplerThresholdSizeFactorDelta = 1;
static const int kSizeLimit = 1500;
-PendingListNode::PendingListNode(JSFunction* function) : next_(NULL) {
- GlobalHandles* global_handles = Isolate::Current()->global_handles();
- function_ = global_handles->Create(function);
- start_ = OS::Ticks();
- global_handles->MakeWeak(function_.location(), this, &WeakCallback);
-}
-
-
-void PendingListNode::Destroy() {
- if (!IsValid()) return;
- GlobalHandles* global_handles = Isolate::Current()->global_handles();
- global_handles->Destroy(function_.location());
- function_= Handle<Object>::null();
-}
-
-
-void PendingListNode::WeakCallback(v8::Persistent<v8::Value>, void* data) {
- reinterpret_cast<PendingListNode*>(data)->Destroy();
-}
-
-
Atomic32 RuntimeProfiler::state_ = 0;
// TODO(isolates): Create the semaphore lazily and clean it up when no
// longer required.
@@ -125,16 +76,8 @@ RuntimeProfiler::RuntimeProfiler(Isolate* isolate)
sampler_threshold_(kSamplerThresholdInit),
sampler_threshold_size_factor_(kSamplerThresholdSizeFactorInit),
sampler_ticks_until_threshold_adjustment_(
- kSamplerTicksBetweenThresholdAdjustment),
- js_ratio_(0),
- sampler_window_position_(0),
- optimize_soon_list_(NULL),
- state_window_position_(0),
- state_window_ticks_(0) {
- state_counts_[IN_NON_JS_STATE] = kStateWindowSize;
- state_counts_[IN_JS_STATE] = 0;
- STATIC_ASSERT(IN_NON_JS_STATE == 0);
- memset(state_window_, 0, sizeof(state_window_));
+ kSamplerTicksBetweenThresholdAdjustment),
+ sampler_window_position_(0) {
ClearSampleBuffer();
}
@@ -148,16 +91,13 @@ void RuntimeProfiler::GlobalSetup() {
}
-void RuntimeProfiler::Optimize(JSFunction* function, bool eager, int delay) {
+void RuntimeProfiler::Optimize(JSFunction* function) {
ASSERT(function->IsOptimizable());
if (FLAG_trace_opt) {
- PrintF("[marking (%s) ", eager ? "eagerly" : "lazily");
+ PrintF("[marking ");
function->PrintName();
PrintF(" 0x%" V8PRIxPTR, reinterpret_cast<intptr_t>(function->address()));
PrintF(" for recompilation");
- if (delay > 0) {
- PrintF(" (delayed %0.3f ms)", static_cast<double>(delay) / 1000);
- }
PrintF("]\n");
}
@@ -243,20 +183,6 @@ void RuntimeProfiler::AddSample(JSFunction* function, int weight) {
void RuntimeProfiler::OptimizeNow() {
HandleScope scope(isolate_);
- PendingListNode* current = optimize_soon_list_;
- while (current != NULL) {
- PendingListNode* next = current->next();
- if (current->IsValid()) {
- Handle<JSFunction> function = current->function();
- int delay = current->Delay();
- if (function->IsOptimizable()) {
- Optimize(*function, true, delay);
- }
- }
- delete current;
- current = next;
- }
- optimize_soon_list_ = NULL;
// Run through the JavaScript frames and collect them. If we already
// have a sample of the function, we mark it for optimizations
@@ -303,24 +229,9 @@ void RuntimeProfiler::OptimizeNow() {
: 1;
int threshold = sampler_threshold_ * threshold_size_factor;
- int current_js_ratio = NoBarrier_Load(&js_ratio_);
-
- // Adjust threshold depending on the ratio of time spent
- // in JS code.
- if (current_js_ratio < 20) {
- // If we spend less than 20% of the time in JS code,
- // do not optimize.
- continue;
- } else if (current_js_ratio < 75) {
- // Below 75% of time spent in JS code, only optimize very
- // frequently used functions.
- threshold *= 3;
- }
if (LookupSample(function) >= threshold) {
- Optimize(function, false, 0);
- isolate_->compilation_cache()->MarkForEagerOptimizing(
- Handle<JSFunction>(function));
+ Optimize(function);
}
}
@@ -333,40 +244,8 @@ void RuntimeProfiler::OptimizeNow() {
}
-void RuntimeProfiler::OptimizeSoon(JSFunction* function) {
- if (!function->IsOptimizable()) return;
- PendingListNode* node = new PendingListNode(function);
- node->set_next(optimize_soon_list_);
- optimize_soon_list_ = node;
-}
-
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-void RuntimeProfiler::UpdateStateRatio(SamplerState current_state) {
- SamplerState old_state = state_window_[state_window_position_];
- state_counts_[old_state]--;
- state_window_[state_window_position_] = current_state;
- state_counts_[current_state]++;
- ASSERT(IsPowerOf2(kStateWindowSize));
- state_window_position_ = (state_window_position_ + 1) &
- (kStateWindowSize - 1);
- // Note: to calculate correct ratio we have to track how many valid
- // ticks are actually in the state window, because on profiler
- // startup this number can be less than the window size.
- state_window_ticks_ = Min(kStateWindowSize, state_window_ticks_ + 1);
- NoBarrier_Store(&js_ratio_, state_counts_[IN_JS_STATE] * 100 /
- state_window_ticks_);
-}
-#endif
-
-
void RuntimeProfiler::NotifyTick() {
#ifdef ENABLE_LOGGING_AND_PROFILING
- // Record state sample.
- SamplerState state = IsSomeIsolateInJS()
- ? IN_JS_STATE
- : IN_NON_JS_STATE;
- UpdateStateRatio(state);
isolate_->stack_guard()->RequestRuntimeProfilerTick();
#endif
}
@@ -424,7 +303,6 @@ void RuntimeProfiler::HandleWakeUp(Isolate* isolate) {
// to get the right count of active isolates.
NoBarrier_AtomicIncrement(&state_, 1);
semaphore_->Signal();
- isolate->ResetEagerOptimizingData();
#endif
}
@@ -471,15 +349,8 @@ void RuntimeProfiler::UpdateSamplesAfterCompact(ObjectVisitor* visitor) {
bool RuntimeProfilerRateLimiter::SuspendIfNecessary() {
#ifdef ENABLE_LOGGING_AND_PROFILING
- static const int kNonJSTicksThreshold = 100;
- if (RuntimeProfiler::IsSomeIsolateInJS()) {
- non_js_ticks_ = 0;
- } else {
- if (non_js_ticks_ < kNonJSTicksThreshold) {
- ++non_js_ticks_;
- } else {
- return RuntimeProfiler::WaitForSomeIsolateToEnterJS();
- }
+ if (!RuntimeProfiler::IsSomeIsolateInJS()) {
+ return RuntimeProfiler::WaitForSomeIsolateToEnterJS();
}
#endif
return false;
diff --git a/deps/v8/src/runtime-profiler.h b/deps/v8/src/runtime-profiler.h
index 692b4ffac..cb05cf6b1 100644
--- a/deps/v8/src/runtime-profiler.h
+++ b/deps/v8/src/runtime-profiler.h
@@ -37,7 +37,6 @@ namespace internal {
class Isolate;
class JSFunction;
class Object;
-class PendingListNode;
class Semaphore;
class RuntimeProfiler {
@@ -52,7 +51,6 @@ class RuntimeProfiler {
}
void OptimizeNow();
- void OptimizeSoon(JSFunction* function);
void NotifyTick();
@@ -106,7 +104,7 @@ class RuntimeProfiler {
static void HandleWakeUp(Isolate* isolate);
- void Optimize(JSFunction* function, bool eager, int delay);
+ void Optimize(JSFunction* function);
void AttemptOnStackReplacement(JSFunction* function);
@@ -118,31 +116,16 @@ class RuntimeProfiler {
void AddSample(JSFunction* function, int weight);
-#ifdef ENABLE_LOGGING_AND_PROFILING
- void UpdateStateRatio(SamplerState current_state);
-#endif
-
Isolate* isolate_;
int sampler_threshold_;
int sampler_threshold_size_factor_;
int sampler_ticks_until_threshold_adjustment_;
- // The ratio of ticks spent in JS code in percent.
- Atomic32 js_ratio_;
-
Object* sampler_window_[kSamplerWindowSize];
int sampler_window_position_;
int sampler_window_weight_[kSamplerWindowSize];
- // Support for pending 'optimize soon' requests.
- PendingListNode* optimize_soon_list_;
-
- SamplerState state_window_[kStateWindowSize];
- int state_window_position_;
- int state_window_ticks_;
- int state_counts_[2];
-
// Possible state values:
// -1 => the profiler thread is waiting on the semaphore
// 0 or positive => the number of isolates running JavaScript code.
@@ -159,7 +142,7 @@ class RuntimeProfiler {
// Rate limiter intended to be used in the profiler thread.
class RuntimeProfilerRateLimiter BASE_EMBEDDED {
public:
- RuntimeProfilerRateLimiter() : non_js_ticks_(0) { }
+ RuntimeProfilerRateLimiter() {}
// Suspends the current thread (which must be the profiler thread)
// when not executing JavaScript to minimize CPU usage. Returns
@@ -170,8 +153,6 @@ class RuntimeProfilerRateLimiter BASE_EMBEDDED {
bool SuspendIfNecessary();
private:
- int non_js_ticks_;
-
DISALLOW_COPY_AND_ASSIGN(RuntimeProfilerRateLimiter);
};
diff --git a/deps/v8/src/runtime.cc b/deps/v8/src/runtime.cc
index df99fdc67..5e96ef8e1 100644
--- a/deps/v8/src/runtime.cc
+++ b/deps/v8/src/runtime.cc
@@ -3918,15 +3918,19 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineDataProperty) {
if (proto->IsNull()) return *obj_value;
js_object = Handle<JSObject>::cast(proto);
}
- NormalizeElements(js_object);
- Handle<NumberDictionary> dictionary(js_object->element_dictionary());
+ Handle<NumberDictionary> dictionary = NormalizeElements(js_object);
// Make sure that we never go back to fast case.
dictionary->set_requires_slow_elements();
PropertyDetails details = PropertyDetails(attr, NORMAL);
Handle<NumberDictionary> extended_dictionary =
NumberDictionarySet(dictionary, index, obj_value, details);
if (*extended_dictionary != *dictionary) {
- js_object->set_elements(*extended_dictionary);
+ if (js_object->GetElementsKind() ==
+ JSObject::NON_STRICT_ARGUMENTS_ELEMENTS) {
+ FixedArray::cast(js_object->elements())->set(1, *extended_dictionary);
+ } else {
+ js_object->set_elements(*extended_dictionary);
+ }
}
return *obj_value;
}
@@ -3981,8 +3985,7 @@ static MaybeObject* NormalizeObjectSetElement(Isolate* isolate,
Handle<Object> value,
PropertyAttributes attr) {
// Normalize the elements to enable attributes on the property.
- NormalizeElements(js_object);
- Handle<NumberDictionary> dictionary(js_object->element_dictionary());
+ Handle<NumberDictionary> dictionary = NormalizeElements(js_object);
// Make sure that we never go back to fast case.
dictionary->set_requires_slow_elements();
PropertyDetails details = PropertyDetails(attr, NORMAL);
@@ -5742,6 +5745,27 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringTrim) {
}
+void FindAsciiStringIndices(Vector<const char> subject,
+ char pattern,
+ ZoneList<int>* indices,
+ unsigned int limit) {
+ ASSERT(limit > 0);
+ // Collect indices of pattern in subject using memchr.
+ // Stop after finding at most limit values.
+ const char* subject_start = reinterpret_cast<const char*>(subject.start());
+ const char* subject_end = subject_start + subject.length();
+ const char* pos = subject_start;
+ while (limit > 0) {
+ pos = reinterpret_cast<const char*>(
+ memchr(pos, pattern, subject_end - pos));
+ if (pos == NULL) return;
+ indices->Add(static_cast<int>(pos - subject_start));
+ pos++;
+ limit--;
+ }
+}
+
+
template <typename SubjectChar, typename PatternChar>
void FindStringIndices(Isolate* isolate,
Vector<const SubjectChar> subject,
@@ -5749,11 +5773,11 @@ void FindStringIndices(Isolate* isolate,
ZoneList<int>* indices,
unsigned int limit) {
ASSERT(limit > 0);
- // Collect indices of pattern in subject, and the end-of-string index.
+ // Collect indices of pattern in subject.
// Stop after finding at most limit values.
- StringSearch<PatternChar, SubjectChar> search(isolate, pattern);
int pattern_length = pattern.length();
int index = 0;
+ StringSearch<PatternChar, SubjectChar> search(isolate, pattern);
while (limit > 0) {
index = search.Search(subject, index);
if (index < 0) return;
@@ -5796,11 +5820,19 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringSplit) {
if (subject->IsAsciiRepresentation()) {
Vector<const char> subject_vector = subject->ToAsciiVector();
if (pattern->IsAsciiRepresentation()) {
- FindStringIndices(isolate,
- subject_vector,
- pattern->ToAsciiVector(),
- &indices,
- limit);
+ Vector<const char> pattern_vector = pattern->ToAsciiVector();
+ if (pattern_vector.length() == 1) {
+ FindAsciiStringIndices(subject_vector,
+ pattern_vector[0],
+ &indices,
+ limit);
+ } else {
+ FindStringIndices(isolate,
+ subject_vector,
+ pattern_vector,
+ &indices,
+ limit);
+ }
} else {
FindStringIndices(isolate,
subject_vector,
@@ -7821,7 +7853,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) {
}
}
- isolate->compilation_cache()->MarkForLazyOptimizing(function);
if (type == Deoptimizer::EAGER) {
RUNTIME_ASSERT(function->IsOptimized());
} else {
@@ -9938,7 +9969,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameCount) {
// If there is no JavaScript stack frame count is 0.
return Smi::FromInt(0);
}
- for (JavaScriptFrameIterator it(isolate, id); !it.done(); it.Advance()) n++;
+
+ for (JavaScriptFrameIterator it(isolate, id); !it.done(); it.Advance()) {
+ n += it.frame()->GetInlineCount();
+ }
return Smi::FromInt(n);
}
@@ -9951,7 +9985,7 @@ static const int kFrameDetailsLocalCountIndex = 4;
static const int kFrameDetailsSourcePositionIndex = 5;
static const int kFrameDetailsConstructCallIndex = 6;
static const int kFrameDetailsAtReturnIndex = 7;
-static const int kFrameDetailsDebuggerFrameIndex = 8;
+static const int kFrameDetailsFlagsIndex = 8;
static const int kFrameDetailsFirstDynamicIndex = 9;
// Return an array with frame details
@@ -9967,7 +10001,7 @@ static const int kFrameDetailsFirstDynamicIndex = 9;
// 5: Source position
// 6: Constructor call
// 7: Is at return
-// 8: Debugger frame
+// 8: Flags
// Arguments name, value
// Locals name, value
// Return value if any
@@ -9990,16 +10024,26 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) {
// If there are no JavaScript stack frames return undefined.
return heap->undefined_value();
}
+
+ int deoptimized_frame_index = -1; // Frame index in optimized frame.
+ DeoptimizedFrameInfo* deoptimized_frame = NULL;
+
int count = 0;
JavaScriptFrameIterator it(isolate, id);
for (; !it.done(); it.Advance()) {
- if (count == index) break;
- count++;
+ if (index < count + it.frame()->GetInlineCount()) break;
+ count += it.frame()->GetInlineCount();
}
if (it.done()) return heap->undefined_value();
- bool is_optimized_frame =
- it.frame()->LookupCode()->kind() == Code::OPTIMIZED_FUNCTION;
+ if (it.frame()->is_optimized()) {
+ deoptimized_frame_index =
+ it.frame()->GetInlineCount() - (index - count) - 1;
+ deoptimized_frame = Deoptimizer::DebuggerInspectableFrame(
+ it.frame(),
+ deoptimized_frame_index,
+ isolate);
+ }
// Traverse the saved contexts chain to find the active context for the
// selected frame.
@@ -10022,6 +10066,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) {
// Get scope info and read from it for local variable information.
Handle<JSFunction> function(JSFunction::cast(it.frame()->function()));
Handle<SerializedScopeInfo> scope_info(function->shared()->scope_info());
+ ASSERT(*scope_info != SerializedScopeInfo::Empty());
ScopeInfo<> info(*scope_info);
// Get the locals names and values into a temporary array.
@@ -10033,23 +10078,20 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) {
isolate->factory()->NewFixedArray(info.NumberOfLocals() * 2);
// Fill in the values of the locals.
- if (is_optimized_frame) {
- // If we are inspecting an optimized frame use undefined as the
- // value for all locals.
- //
- // TODO(1140): We should be able to get the correct values
- // for locals in optimized frames.
- for (int i = 0; i < info.NumberOfLocals(); i++) {
- locals->set(i * 2, *info.LocalName(i));
- locals->set(i * 2 + 1, isolate->heap()->undefined_value());
- }
- } else {
- int i = 0;
- for (; i < info.number_of_stack_slots(); ++i) {
- // Use the value from the stack.
- locals->set(i * 2, *info.LocalName(i));
+ int i = 0;
+ for (; i < info.number_of_stack_slots(); ++i) {
+ // Use the value from the stack.
+ locals->set(i * 2, *info.LocalName(i));
+ if (it.frame()->is_optimized()) {
+ // Get the value from the deoptimized frame.
+ locals->set(i * 2 + 1,
+ deoptimized_frame->GetExpression(i));
+ } else {
+ // Get the value from the stack.
locals->set(i * 2 + 1, it.frame()->GetExpression(i));
}
+ }
+ if (i < info.NumberOfLocals()) {
// Get the context containing declarations.
Handle<Context> context(
Context::cast(it.frame()->context())->declaration_context());
@@ -10064,7 +10106,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) {
// Check whether this frame is positioned at return. If not top
// frame or if the frame is optimized it cannot be at a return.
bool at_return = false;
- if (!is_optimized_frame && index == 0) {
+ if (!it.frame()->is_optimized() && index == 0) {
at_return = isolate->debug()->IsBreakAtReturn(it.frame());
}
@@ -10145,10 +10187,21 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) {
// Add the at return information.
details->set(kFrameDetailsAtReturnIndex, heap->ToBoolean(at_return));
- // Add information on whether this frame is invoked in the debugger context.
- details->set(kFrameDetailsDebuggerFrameIndex,
- heap->ToBoolean(*save->context() ==
- *isolate->debug()->debug_context()));
+ // Add flags to indicate information on whether this frame is
+ // bit 0: invoked in the debugger context.
+ // bit 1: optimized frame.
+ // bit 2: inlined in optimized frame
+ int flags = 0;
+ if (*save->context() == *isolate->debug()->debug_context()) {
+ flags |= 1 << 0;
+ }
+ if (it.frame()->is_optimized()) {
+ flags |= 1 << 1;
+ if (deoptimized_frame_index > 0) {
+ flags |= 1 << 2;
+ }
+ }
+ details->set(kFrameDetailsFlagsIndex, Smi::FromInt(flags));
// Fill the dynamic part.
int details_index = kFrameDetailsFirstDynamicIndex;
@@ -10167,7 +10220,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) {
//
// TODO(3141533): We should be able to get the actual parameter
// value for optimized frames.
- if (!is_optimized_frame &&
+ if (!it.frame()->is_optimized() &&
(i < it.frame()->ComputeParametersCount())) {
details->set(details_index++, it.frame()->GetParameter(i));
} else {
@@ -10203,6 +10256,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) {
}
details->set(kFrameDetailsReceiverIndex, *receiver);
+ // Get rid of the calculated deoptimized frame if any.
+ if (deoptimized_frame != NULL) {
+ Deoptimizer::DeleteDebuggerInspectableFrame(deoptimized_frame,
+ isolate);
+ }
+
ASSERT_EQ(details_size, details_index);
return *isolate->factory()->NewJSArrayWithElements(details);
}
@@ -10263,7 +10322,7 @@ static Handle<JSObject> MaterializeLocalScope(Isolate* isolate,
}
// Second fill all stack locals.
- for (int i = 0; i < scope_info.number_of_stack_slots(); i++) {
+ for (int i = 0; i < scope_info.number_of_stack_slots(); ++i) {
RETURN_IF_EMPTY_HANDLE_VALUE(
isolate,
SetProperty(local_scope,
@@ -10274,37 +10333,40 @@ static Handle<JSObject> MaterializeLocalScope(Isolate* isolate,
Handle<JSObject>());
}
- // Third fill all context locals.
- Handle<Context> frame_context(Context::cast(frame->context()));
- Handle<Context> function_context(frame_context->declaration_context());
- if (!CopyContextLocalsToScopeObject(isolate,
- serialized_scope_info, scope_info,
- function_context, local_scope)) {
- return Handle<JSObject>();
- }
-
- // Finally copy any properties from the function context extension. This will
- // be variables introduced by eval.
- if (function_context->closure() == *function) {
- if (function_context->has_extension() &&
- !function_context->IsGlobalContext()) {
- Handle<JSObject> ext(JSObject::cast(function_context->extension()));
- Handle<FixedArray> keys = GetKeysInFixedArrayFor(ext, INCLUDE_PROTOS);
- for (int i = 0; i < keys->length(); i++) {
- // Names of variables introduced by eval are strings.
- ASSERT(keys->get(i)->IsString());
- Handle<String> key(String::cast(keys->get(i)));
- RETURN_IF_EMPTY_HANDLE_VALUE(
- isolate,
- SetProperty(local_scope,
- key,
- GetProperty(ext, key),
- NONE,
- kNonStrictMode),
- Handle<JSObject>());
+ if (scope_info.number_of_context_slots() > Context::MIN_CONTEXT_SLOTS) {
+ // Third fill all context locals.
+ Handle<Context> frame_context(Context::cast(frame->context()));
+ Handle<Context> function_context(frame_context->declaration_context());
+ if (!CopyContextLocalsToScopeObject(isolate,
+ serialized_scope_info, scope_info,
+ function_context, local_scope)) {
+ return Handle<JSObject>();
+ }
+
+ // Finally copy any properties from the function context extension.
+ // These will be variables introduced by eval.
+ if (function_context->closure() == *function) {
+ if (function_context->has_extension() &&
+ !function_context->IsGlobalContext()) {
+ Handle<JSObject> ext(JSObject::cast(function_context->extension()));
+ Handle<FixedArray> keys = GetKeysInFixedArrayFor(ext, INCLUDE_PROTOS);
+ for (int i = 0; i < keys->length(); i++) {
+ // Names of variables introduced by eval are strings.
+ ASSERT(keys->get(i)->IsString());
+ Handle<String> key(String::cast(keys->get(i)));
+ RETURN_IF_EMPTY_HANDLE_VALUE(
+ isolate,
+ SetProperty(local_scope,
+ key,
+ GetProperty(ext, key),
+ NONE,
+ kNonStrictMode),
+ Handle<JSObject>());
+ }
}
}
}
+
return local_scope;
}
@@ -12074,22 +12136,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SummarizeLOL) {
#ifdef ENABLE_LOGGING_AND_PROFILING
RUNTIME_FUNCTION(MaybeObject*, Runtime_ProfilerResume) {
NoHandleAllocation ha;
- ASSERT(args.length() == 2);
-
- CONVERT_CHECKED(Smi, smi_modules, args[0]);
- CONVERT_CHECKED(Smi, smi_tag, args[1]);
- v8::V8::ResumeProfilerEx(smi_modules->value(), smi_tag->value());
+ v8::V8::ResumeProfiler();
return isolate->heap()->undefined_value();
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_ProfilerPause) {
NoHandleAllocation ha;
- ASSERT(args.length() == 2);
-
- CONVERT_CHECKED(Smi, smi_modules, args[0]);
- CONVERT_CHECKED(Smi, smi_tag, args[1]);
- v8::V8::PauseProfilerEx(smi_modules->value(), smi_tag->value());
+ v8::V8::PauseProfiler();
return isolate->heap()->undefined_value();
}
@@ -12451,6 +12505,28 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IS_VAR) {
}
+#define ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(Name) \
+ RUNTIME_FUNCTION(MaybeObject*, Runtime_Has##Name) { \
+ CONVERT_CHECKED(JSObject, obj, args[0]); \
+ return isolate->heap()->ToBoolean(obj->Has##Name()); \
+ }
+
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastDoubleElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(DictionaryElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalPixelElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalArrayElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalByteElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalUnsignedByteElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalShortElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalUnsignedShortElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalIntElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalUnsignedIntElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalFloatElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalDoubleElements)
+
+#undef ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION
+
// ----------------------------------------------------------------------------
// Implementation of Runtime
diff --git a/deps/v8/src/runtime.h b/deps/v8/src/runtime.h
index 5efc057df..0900fd360 100644
--- a/deps/v8/src/runtime.h
+++ b/deps/v8/src/runtime.h
@@ -334,7 +334,23 @@ namespace internal {
F(MessageGetScript, 1, 1) \
\
/* Pseudo functions - handled as macros by parser */ \
- F(IS_VAR, 1, 1)
+ F(IS_VAR, 1, 1) \
+ \
+ /* expose boolean functions from objects-inl.h */ \
+ F(HasFastElements, 1, 1) \
+ F(HasFastDoubleElements, 1, 1) \
+ F(HasDictionaryElements, 1, 1) \
+ F(HasExternalPixelElements, 1, 1) \
+ F(HasExternalArrayElements, 1, 1) \
+ F(HasExternalByteElements, 1, 1) \
+ F(HasExternalUnsignedByteElements, 1, 1) \
+ F(HasExternalShortElements, 1, 1) \
+ F(HasExternalUnsignedShortElements, 1, 1) \
+ F(HasExternalIntElements, 1, 1) \
+ F(HasExternalUnsignedIntElements, 1, 1) \
+ F(HasExternalFloatElements, 1, 1) \
+ F(HasExternalDoubleElements, 1, 1)
+
#ifdef ENABLE_DEBUGGER_SUPPORT
#define RUNTIME_FUNCTION_LIST_DEBUGGER_SUPPORT(F) \
@@ -413,8 +429,8 @@ namespace internal {
#ifdef ENABLE_LOGGING_AND_PROFILING
#define RUNTIME_FUNCTION_LIST_PROFILER_SUPPORT(F) \
- F(ProfilerResume, 2, 1) \
- F(ProfilerPause, 2, 1)
+ F(ProfilerResume, 0, 1) \
+ F(ProfilerPause, 0, 1)
#else
#define RUNTIME_FUNCTION_LIST_PROFILER_SUPPORT(F)
#endif
@@ -470,7 +486,8 @@ namespace internal {
F(IsRegExpEquivalent, 2, 1) \
F(HasCachedArrayIndex, 1, 1) \
F(GetCachedArrayIndex, 1, 1) \
- F(FastAsciiArrayJoin, 2, 1)
+ F(FastAsciiArrayJoin, 2, 1) \
+ F(IsNativeOrStrictMode, 1, 1)
// ----------------------------------------------------------------------------
diff --git a/deps/v8/src/scopes.cc b/deps/v8/src/scopes.cc
index 74d0c2a2e..5546875c4 100644
--- a/deps/v8/src/scopes.cc
+++ b/deps/v8/src/scopes.cc
@@ -119,9 +119,9 @@ Scope::Scope(Type type)
temps_(0),
params_(0),
unresolved_(0),
- decls_(0) {
+ decls_(0),
+ already_resolved_(false) {
SetDefaults(type, NULL, Handle<SerializedScopeInfo>::null());
- ASSERT(!resolved());
}
@@ -131,14 +131,14 @@ Scope::Scope(Scope* outer_scope, Type type)
temps_(4),
params_(4),
unresolved_(16),
- decls_(4) {
+ decls_(4),
+ already_resolved_(false) {
SetDefaults(type, outer_scope, Handle<SerializedScopeInfo>::null());
// At some point we might want to provide outer scopes to
// eval scopes (by walking the stack and reading the scope info).
// In that case, the ASSERT below needs to be adjusted.
ASSERT((type == GLOBAL_SCOPE || type == EVAL_SCOPE) == (outer_scope == NULL));
ASSERT(!HasIllegalRedeclaration());
- ASSERT(!resolved());
}
@@ -148,15 +148,34 @@ Scope::Scope(Scope* inner_scope, Handle<SerializedScopeInfo> scope_info)
temps_(4),
params_(4),
unresolved_(16),
- decls_(4) {
+ decls_(4),
+ already_resolved_(true) {
ASSERT(!scope_info.is_null());
SetDefaults(FUNCTION_SCOPE, NULL, scope_info);
- ASSERT(resolved());
if (scope_info->HasHeapAllocatedLocals()) {
num_heap_slots_ = scope_info_->NumberOfContextSlots();
}
+ AddInnerScope(inner_scope);
+}
+
+Scope::Scope(Scope* inner_scope, Handle<String> catch_variable_name)
+ : inner_scopes_(1),
+ variables_(),
+ temps_(0),
+ params_(0),
+ unresolved_(0),
+ decls_(0),
+ already_resolved_(true) {
+ SetDefaults(CATCH_SCOPE, NULL, Handle<SerializedScopeInfo>::null());
AddInnerScope(inner_scope);
+ ++num_var_or_const_;
+ Variable* variable = variables_.Declare(this,
+ catch_variable_name,
+ Variable::VAR,
+ true, // Valid left-hand side.
+ Variable::NORMAL);
+ AllocateHeapSlot(variable);
}
@@ -190,30 +209,43 @@ void Scope::SetDefaults(Type type,
Scope* Scope::DeserializeScopeChain(CompilationInfo* info,
Scope* global_scope) {
+ // Reconstruct the outer scope chain from a closure's context chain.
ASSERT(!info->closure().is_null());
- // If we have a serialized scope info, reuse it.
+ Context* context = info->closure()->context();
+ Scope* current_scope = NULL;
Scope* innermost_scope = NULL;
- Scope* scope = NULL;
-
- SerializedScopeInfo* scope_info = info->closure()->shared()->scope_info();
- if (scope_info != SerializedScopeInfo::Empty()) {
- JSFunction* current = *info->closure();
- do {
- current = current->context()->closure();
- Handle<SerializedScopeInfo> scope_info(current->shared()->scope_info());
- if (*scope_info != SerializedScopeInfo::Empty()) {
- scope = new Scope(scope, scope_info);
- if (innermost_scope == NULL) innermost_scope = scope;
+ bool contains_with = false;
+ while (!context->IsGlobalContext()) {
+ if (context->IsWithContext()) {
+ // All the inner scopes are inside a with.
+ contains_with = true;
+ for (Scope* s = innermost_scope; s != NULL; s = s->outer_scope()) {
+ s->scope_inside_with_ = true;
+ }
+ } else {
+ if (context->IsFunctionContext()) {
+ SerializedScopeInfo* scope_info =
+ context->closure()->shared()->scope_info();
+ current_scope =
+ new Scope(current_scope, Handle<SerializedScopeInfo>(scope_info));
} else {
- ASSERT(current->context()->IsGlobalContext());
+ ASSERT(context->IsCatchContext());
+ String* name = String::cast(context->extension());
+ current_scope = new Scope(current_scope, Handle<String>(name));
}
- } while (!current->context()->IsGlobalContext());
- }
+ if (contains_with) current_scope->RecordWithStatement();
+ if (innermost_scope == NULL) innermost_scope = current_scope;
+ }
- global_scope->AddInnerScope(scope);
- if (innermost_scope == NULL) innermost_scope = global_scope;
+ // Forget about a with when we move to a context for a different function.
+ if (context->previous()->closure() != context->closure()) {
+ contains_with = false;
+ }
+ context = context->previous();
+ }
- return innermost_scope;
+ global_scope->AddInnerScope(current_scope);
+ return (innermost_scope == NULL) ? global_scope : innermost_scope;
}
@@ -238,7 +270,7 @@ bool Scope::Analyze(CompilationInfo* info) {
void Scope::Initialize(bool inside_with) {
- ASSERT(!resolved());
+ ASSERT(!already_resolved());
// Add this scope as a new inner scope of the outer scope.
if (outer_scope_ != NULL) {
@@ -256,11 +288,16 @@ void Scope::Initialize(bool inside_with) {
// instead load them directly from the stack. Currently, the only
// such parameter is 'this' which is passed on the stack when
// invoking scripts
- Variable* var =
- variables_.Declare(this, FACTORY->this_symbol(), Variable::VAR,
- false, Variable::THIS);
- var->set_rewrite(new Slot(var, Slot::PARAMETER, -1));
- receiver_ = var;
+ if (is_catch_scope()) {
+ ASSERT(outer_scope() != NULL);
+ receiver_ = outer_scope()->receiver();
+ } else {
+ Variable* var =
+ variables_.Declare(this, FACTORY->this_symbol(), Variable::VAR,
+ false, Variable::THIS);
+ var->set_rewrite(new Slot(var, Slot::PARAMETER, -1));
+ receiver_ = var;
+ }
if (is_function_scope()) {
// Declare 'arguments' variable which exists in all functions.
@@ -274,11 +311,10 @@ void Scope::Initialize(bool inside_with) {
Variable* Scope::LocalLookup(Handle<String> name) {
Variable* result = variables_.Lookup(name);
- if (result != NULL || !resolved()) {
+ if (result != NULL || scope_info_.is_null()) {
return result;
}
- // If the scope is resolved, we can find a variable in serialized scope
- // info.
+ // If we have a serialized scope info, we might find the variable there.
//
// We should never lookup 'arguments' in this scope as it is implicitly
// present in every scope.
@@ -326,7 +362,7 @@ Variable* Scope::DeclareFunctionVar(Handle<String> name) {
void Scope::DeclareParameter(Handle<String> name) {
- ASSERT(!resolved());
+ ASSERT(!already_resolved());
ASSERT(is_function_scope());
Variable* var =
variables_.Declare(this, name, Variable::VAR, true, Variable::NORMAL);
@@ -335,7 +371,7 @@ void Scope::DeclareParameter(Handle<String> name) {
Variable* Scope::DeclareLocal(Handle<String> name, Variable::Mode mode) {
- ASSERT(!resolved());
+ ASSERT(!already_resolved());
// This function handles VAR and CONST modes. DYNAMIC variables are
// introduces during variable allocation, INTERNAL variables are allocated
// explicitly, and TEMPORARY variables are allocated via NewTemporary().
@@ -358,7 +394,7 @@ VariableProxy* Scope::NewUnresolved(Handle<String> name,
// Note that we must not share the unresolved variables with
// the same name because they may be removed selectively via
// RemoveUnresolved().
- ASSERT(!resolved());
+ ASSERT(!already_resolved());
VariableProxy* proxy = new VariableProxy(name, false, inside_with, position);
unresolved_.Add(proxy);
return proxy;
@@ -378,7 +414,7 @@ void Scope::RemoveUnresolved(VariableProxy* var) {
Variable* Scope::NewTemporary(Handle<String> name) {
- ASSERT(!resolved());
+ ASSERT(!already_resolved());
Variable* var =
new Variable(this, name, Variable::TEMPORARY, true, Variable::NORMAL);
temps_.Add(var);
@@ -508,12 +544,22 @@ int Scope::ContextChainLength(Scope* scope) {
}
+Scope* Scope::DeclarationScope() {
+ Scope* scope = this;
+ while (scope->is_catch_scope()) {
+ scope = scope->outer_scope();
+ }
+ return scope;
+}
+
+
#ifdef DEBUG
static const char* Header(Scope::Type type) {
switch (type) {
case Scope::EVAL_SCOPE: return "eval";
case Scope::FUNCTION_SCOPE: return "function";
case Scope::GLOBAL_SCOPE: return "global";
+ case Scope::CATCH_SCOPE: return "catch";
}
UNREACHABLE();
return NULL;
@@ -864,8 +910,10 @@ bool Scope::MustAllocate(Variable* var) {
// visible name.
if ((var->is_this() || var->name()->length() > 0) &&
(var->is_accessed_from_inner_scope() ||
- scope_calls_eval_ || inner_scope_calls_eval_ ||
- scope_contains_with_)) {
+ scope_calls_eval_ ||
+ inner_scope_calls_eval_ ||
+ scope_contains_with_ ||
+ is_catch_scope())) {
var->set_is_used(true);
}
// Global variables do not need to be allocated.
@@ -874,16 +922,20 @@ bool Scope::MustAllocate(Variable* var) {
bool Scope::MustAllocateInContext(Variable* var) {
- // If var is accessed from an inner scope, or if there is a
- // possibility that it might be accessed from the current or an inner
- // scope (through an eval() call), it must be allocated in the
- // context. Exception: temporary variables are not allocated in the
+ // If var is accessed from an inner scope, or if there is a possibility
+ // that it might be accessed from the current or an inner scope (through
+ // an eval() call or a runtime with lookup), it must be allocated in the
// context.
- return
- var->mode() != Variable::TEMPORARY &&
- (var->is_accessed_from_inner_scope() ||
- scope_calls_eval_ || inner_scope_calls_eval_ ||
- scope_contains_with_ || var->is_global());
+ //
+ // Exceptions: temporary variables are never allocated in a context;
+ // catch-bound variables are always allocated in a context.
+ if (var->mode() == Variable::TEMPORARY) return false;
+ if (is_catch_scope()) return true;
+ return var->is_accessed_from_inner_scope() ||
+ scope_calls_eval_ ||
+ inner_scope_calls_eval_ ||
+ scope_contains_with_ ||
+ var->is_global();
}
@@ -1010,7 +1062,7 @@ void Scope::AllocateVariablesRecursively() {
// If scope is already resolved, we still need to allocate
// variables in inner scopes which might not had been resolved yet.
- if (resolved()) return;
+ if (already_resolved()) return;
// The number of slots required for variables.
num_stack_slots_ = 0;
num_heap_slots_ = Context::MIN_CONTEXT_SLOTS;
diff --git a/deps/v8/src/scopes.h b/deps/v8/src/scopes.h
index d4e8e2bd9..a493d5752 100644
--- a/deps/v8/src/scopes.h
+++ b/deps/v8/src/scopes.h
@@ -90,9 +90,10 @@ class Scope: public ZoneObject {
// Construction
enum Type {
- EVAL_SCOPE, // the top-level scope for an 'eval' source
- FUNCTION_SCOPE, // the top-level scope for a function
- GLOBAL_SCOPE // the top-level scope for a program or a top-level eval
+ EVAL_SCOPE, // The top-level scope for an eval source.
+ FUNCTION_SCOPE, // The top-level scope for a function.
+ GLOBAL_SCOPE, // The top-level scope for a program or a top-level eval.
+ CATCH_SCOPE // The scope introduced by catch.
};
Scope(Scope* outer_scope, Type type);
@@ -202,6 +203,7 @@ class Scope: public ZoneObject {
bool is_eval_scope() const { return type_ == EVAL_SCOPE; }
bool is_function_scope() const { return type_ == FUNCTION_SCOPE; }
bool is_global_scope() const { return type_ == GLOBAL_SCOPE; }
+ bool is_catch_scope() const { return type_ == CATCH_SCOPE; }
bool is_strict_mode() const { return strict_mode_; }
bool is_strict_mode_eval_scope() const {
return is_eval_scope() && is_strict_mode();
@@ -225,13 +227,8 @@ class Scope: public ZoneObject {
// ---------------------------------------------------------------------------
// Accessors.
- // A new variable proxy corresponding to the (function) receiver.
- VariableProxy* receiver() const {
- VariableProxy* proxy =
- new VariableProxy(FACTORY->this_symbol(), true, false);
- proxy->BindTo(receiver_);
- return proxy;
- }
+ // The variable corresponding the 'this' value.
+ Variable* receiver() { return receiver_; }
// The variable holding the function literal for named function
// literals, or NULL.
@@ -293,6 +290,10 @@ class Scope: public ZoneObject {
// The number of contexts between this and scope; zero if this == scope.
int ContextChainLength(Scope* scope);
+ // Find the first function, global, or eval scope. This is the scope
+ // where var declarations will be hoisted to in the implementation.
+ Scope* DeclarationScope();
+
// ---------------------------------------------------------------------------
// Strict mode support.
bool IsDeclared(Handle<String> name) {
@@ -367,6 +368,10 @@ class Scope: public ZoneObject {
bool outer_scope_is_eval_scope_;
bool force_eager_compilation_;
+ // True if it doesn't need scope resolution (e.g., if the scope was
+ // constructed based on a serialized scope info or a catch context).
+ bool already_resolved_;
+
// Computed as variables are declared.
int num_var_or_const_;
@@ -376,7 +381,7 @@ class Scope: public ZoneObject {
// Serialized scopes support.
Handle<SerializedScopeInfo> scope_info_;
- bool resolved() { return !scope_info_.is_null(); }
+ bool already_resolved() { return already_resolved_; }
// Create a non-local variable with a given name.
// These variables are looked up dynamically at runtime.
@@ -412,8 +417,12 @@ class Scope: public ZoneObject {
void AllocateVariablesRecursively();
private:
+ // Construct a function scope based on the scope info.
Scope(Scope* inner_scope, Handle<SerializedScopeInfo> scope_info);
+ // Construct a catch scope with a binding for the name.
+ Scope(Scope* inner_scope, Handle<String> catch_variable_name);
+
void AddInnerScope(Scope* inner_scope) {
if (inner_scope != NULL) {
inner_scopes_.Add(inner_scope);
diff --git a/deps/v8/src/string.js b/deps/v8/src/string.js
index bed211a03..01224e796 100644
--- a/deps/v8/src/string.js
+++ b/deps/v8/src/string.js
@@ -251,7 +251,9 @@ function StringReplace(search, replace) {
// Compute the string to replace with.
if (IS_FUNCTION(replace)) {
- builder.add(%_CallFunction(%GetGlobalReceiver(),
+ var receiver =
+ %_IsNativeOrStrictMode(replace) ? void 0 : %GetGlobalReceiver();
+ builder.add(%_CallFunction(receiver,
search,
start,
subject,
@@ -418,7 +420,8 @@ function StringReplaceGlobalRegExpWithFunction(subject, regexp, replace) {
if (NUMBER_OF_CAPTURES(lastMatchInfo) == 2) {
var match_start = 0;
var override = new InternalArray(null, 0, subject);
- var receiver = %GetGlobalReceiver();
+ var receiver =
+ %_IsNativeOrStrictMode(replace) ? void 0 : %GetGlobalReceiver();
while (i < len) {
var elem = res[i];
if (%_IsSmi(elem)) {
@@ -475,8 +478,10 @@ function StringReplaceNonGlobalRegExpWithFunction(subject, regexp, replace) {
// No captures, only the match, which is always valid.
var s = SubString(subject, index, endOfMatch);
// Don't call directly to avoid exposing the built-in global object.
+ var receiver =
+ %_IsNativeOrStrictMode(replace) ? void 0 : %GetGlobalReceiver();
replacement =
- %_CallFunction(%GetGlobalReceiver(), s, index, subject, replace);
+ %_CallFunction(receiver, s, index, subject, replace);
} else {
var parameters = new InternalArray(m + 2);
for (var j = 0; j < m; j++) {
diff --git a/deps/v8/src/stub-cache.cc b/deps/v8/src/stub-cache.cc
index d5392d95b..eb813814d 100644
--- a/deps/v8/src/stub-cache.cc
+++ b/deps/v8/src/stub-cache.cc
@@ -1694,6 +1694,8 @@ MaybeObject* KeyedLoadStubCompiler::ComputeSharedKeyedLoadElementStub(
} else if (receiver_map->has_external_array_elements()) {
JSObject::ElementsKind elements_kind = receiver_map->elements_kind();
maybe_stub = KeyedLoadExternalArrayStub(elements_kind).TryGetCode();
+ } else if (receiver_map->has_dictionary_elements()) {
+ maybe_stub = isolate()->builtins()->builtin(Builtins::kKeyedLoadIC_Slow);
} else {
UNREACHABLE();
}
@@ -1746,6 +1748,8 @@ MaybeObject* KeyedStoreStubCompiler::ComputeSharedKeyedStoreElementStub(
} else if (receiver_map->has_external_array_elements()) {
JSObject::ElementsKind elements_kind = receiver_map->elements_kind();
maybe_stub = KeyedStoreExternalArrayStub(elements_kind).TryGetCode();
+ } else if (receiver_map->has_dictionary_elements()) {
+ maybe_stub = isolate()->builtins()->builtin(Builtins::kKeyedStoreIC_Slow);
} else {
UNREACHABLE();
}
diff --git a/deps/v8/src/type-info.cc b/deps/v8/src/type-info.cc
index 4c34ff8fb..defb1ae96 100644
--- a/deps/v8/src/type-info.cc
+++ b/deps/v8/src/type-info.cc
@@ -122,6 +122,7 @@ bool TypeFeedbackOracle::StoreIsMegamorphicWithTypeInfo(Expression* expr) {
Builtins* builtins = Isolate::Current()->builtins();
return code->is_keyed_store_stub() &&
*code != builtins->builtin(Builtins::kKeyedStoreIC_Generic) &&
+ *code != builtins->builtin(Builtins::kKeyedStoreIC_Generic_Strict) &&
code->ic_state() == MEGAMORPHIC;
}
return false;
diff --git a/deps/v8/src/v8-counters.h b/deps/v8/src/v8-counters.h
index 17e67016c..2de830300 100644
--- a/deps/v8/src/v8-counters.h
+++ b/deps/v8/src/v8-counters.h
@@ -170,14 +170,10 @@ namespace internal {
SC(named_load_inline_field, V8.NamedLoadInlineFast) \
SC(keyed_load_inline_generic, V8.KeyedLoadInlineGeneric) \
SC(keyed_load_inline_fast, V8.KeyedLoadInlineFast) \
- SC(named_load_full, V8.NamedLoadFull) \
- SC(keyed_load_full, V8.KeyedLoadFull) \
SC(keyed_store_inline_generic, V8.KeyedStoreInlineGeneric) \
SC(keyed_store_inline_fast, V8.KeyedStoreInlineFast) \
SC(named_store_inline_generic, V8.NamedStoreInlineGeneric) \
SC(named_store_inline_fast, V8.NamedStoreInlineFast) \
- SC(keyed_store_full, V8.KeyedStoreFull) \
- SC(named_store_full, V8.NamedStoreFull) \
SC(keyed_store_inline_miss, V8.KeyedStoreInlineMiss) \
SC(named_store_global_inline, V8.NamedStoreGlobalInline) \
SC(named_store_global_inline_miss, V8.NamedStoreGlobalInlineMiss) \
diff --git a/deps/v8/src/v8.cc b/deps/v8/src/v8.cc
index 0b562fc28..11af057b1 100644
--- a/deps/v8/src/v8.cc
+++ b/deps/v8/src/v8.cc
@@ -100,42 +100,34 @@ void V8::TearDown() {
}
-static uint32_t random_seed() {
- if (FLAG_random_seed == 0) {
- return random();
+static void seed_random(uint32_t* state) {
+ for (int i = 0; i < 2; ++i) {
+ state[i] = FLAG_random_seed;
+ while (state[i] == 0) {
+ state[i] = random();
+ }
}
- return FLAG_random_seed;
}
-typedef struct {
- uint32_t hi;
- uint32_t lo;
-} random_state;
+// Random number generator using George Marsaglia's MWC algorithm.
+static uint32_t random_base(uint32_t* state) {
+ // Initialize seed using the system random().
+ // No non-zero seed will ever become zero again.
+ if (state[0] == 0) seed_random(state);
+ // Mix the bits. Never replaces state[i] with 0 if it is nonzero.
+ state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16);
+ state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16);
-// Random number generator using George Marsaglia's MWC algorithm.
-static uint32_t random_base(random_state *state) {
- // Initialize seed using the system random(). If one of the seeds
- // should ever become zero again, or if random() returns zero, we
- // avoid getting stuck with zero bits in hi or lo by re-initializing
- // them on demand.
- if (state->hi == 0) state->hi = random_seed();
- if (state->lo == 0) state->lo = random_seed();
-
- // Mix the bits.
- state->hi = 36969 * (state->hi & 0xFFFF) + (state->hi >> 16);
- state->lo = 18273 * (state->lo & 0xFFFF) + (state->lo >> 16);
- return (state->hi << 16) + (state->lo & 0xFFFF);
+ return (state[0] << 14) + (state[1] & 0x3FFFF);
}
// Used by JavaScript APIs
uint32_t V8::Random(Isolate* isolate) {
ASSERT(isolate == Isolate::Current());
- // TODO(isolates): move lo and hi to isolate
- static random_state state = {0, 0};
- return random_base(&state);
+ return random_base(isolate->random_seed());
}
@@ -144,9 +136,7 @@ uint32_t V8::Random(Isolate* isolate) {
// leaks that could be used in an exploit.
uint32_t V8::RandomPrivate(Isolate* isolate) {
ASSERT(isolate == Isolate::Current());
- // TODO(isolates): move lo and hi to isolate
- static random_state state = {0, 0};
- return random_base(&state);
+ return random_base(isolate->private_random_seed());
}
diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc
index be8d453d4..ed9b5e316 100644
--- a/deps/v8/src/version.cc
+++ b/deps/v8/src/version.cc
@@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 3
#define MINOR_VERSION 4
-#define BUILD_NUMBER 8
+#define BUILD_NUMBER 9
#define PATCH_LEVEL 0
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index 2971db845..4c1290414 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -1168,7 +1168,7 @@ class Assembler : public AssemblerBase {
// Call near relative 32-bit displacement, relative to next instruction.
void call(Label* L);
void call(Handle<Code> target,
- RelocInfo::Mode rmode,
+ RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
unsigned ast_id = kNoASTId);
// Calls directly to the given address using a relative offset.
@@ -1350,7 +1350,9 @@ class Assembler : public AssemblerBase {
void Print();
// Check the code size generated from label to here.
- int SizeOfCodeGeneratedSince(Label* l) { return pc_offset() - l->pos(); }
+ int SizeOfCodeGeneratedSince(Label* label) {
+ return pc_offset() - label->pos();
+ }
// Mark address of the ExitJSFrame code.
void RecordJSReturn();
diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc
index 81514d1e9..6c1a9c643 100644
--- a/deps/v8/src/x64/code-stubs-x64.cc
+++ b/deps/v8/src/x64/code-stubs-x64.cc
@@ -424,12 +424,10 @@ void UnaryOpStub::Generate(MacroAssembler* masm) {
void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
__ pop(rcx); // Save return address.
- __ push(rax);
- // Left and right arguments are now on top.
- // Push this stub's key. Although the operation and the type info are
- // encoded into the key, the encoding is opaque, so push them too.
- __ Push(Smi::FromInt(MinorKey()));
+
+ __ push(rax); // the operand
__ Push(Smi::FromInt(op_));
+ __ Push(Smi::FromInt(mode_));
__ Push(Smi::FromInt(operand_type_));
__ push(rcx); // Push return address.
@@ -437,10 +435,7 @@ void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
// Patch the caller to an appropriate specialized stub and return the
// operation result to the caller of the stub.
__ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kUnaryOp_Patch),
- masm->isolate()),
- 4,
- 1);
+ ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
}
diff --git a/deps/v8/src/x64/code-stubs-x64.h b/deps/v8/src/x64/code-stubs-x64.h
index a7ed91c50..6a07b3b84 100644
--- a/deps/v8/src/x64/code-stubs-x64.h
+++ b/deps/v8/src/x64/code-stubs-x64.h
@@ -61,18 +61,11 @@ class TranscendentalCacheStub: public CodeStub {
class UnaryOpStub: public CodeStub {
public:
- UnaryOpStub(Token::Value op, UnaryOverwriteMode mode)
+ UnaryOpStub(Token::Value op,
+ UnaryOverwriteMode mode,
+ UnaryOpIC::TypeInfo operand_type = UnaryOpIC::UNINITIALIZED)
: op_(op),
mode_(mode),
- operand_type_(UnaryOpIC::UNINITIALIZED),
- name_(NULL) {
- }
-
- UnaryOpStub(
- int key,
- UnaryOpIC::TypeInfo operand_type)
- : op_(OpBits::decode(key)),
- mode_(ModeBits::decode(key)),
operand_type_(operand_type),
name_(NULL) {
}
@@ -90,8 +83,7 @@ class UnaryOpStub: public CodeStub {
#ifdef DEBUG
void Print() {
- PrintF("UnaryOpStub %d (op %s), "
- "(mode %d, runtime_type_info %s)\n",
+ PrintF("UnaryOpStub %d (op %s), (mode %d, runtime_type_info %s)\n",
MinorKey(),
Token::String(op_),
static_cast<int>(mode_),
diff --git a/deps/v8/src/x64/deoptimizer-x64.cc b/deps/v8/src/x64/deoptimizer-x64.cc
index abac2b6b3..7eb08f7e0 100644
--- a/deps/v8/src/x64/deoptimizer-x64.cc
+++ b/deps/v8/src/x64/deoptimizer-x64.cc
@@ -316,7 +316,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
USE(height_in_bytes);
unsigned fixed_size = ComputeFixedSize(function_);
- unsigned input_frame_size = static_cast<unsigned>(input_->GetFrameSize());
+ unsigned input_frame_size = input_->GetFrameSize();
ASSERT(fixed_size + height_in_bytes == input_frame_size);
unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize;
@@ -340,6 +340,9 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
output_ = new FrameDescription*[1];
output_[0] = new(output_frame_size) FrameDescription(
output_frame_size, function_);
+#ifdef DEBUG
+ output_[0]->SetKind(Code::OPTIMIZED_FUNCTION);
+#endif
// Clear the incoming parameters in the optimized frame to avoid
// confusing the garbage collector.
@@ -448,12 +451,15 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
// The 'fixed' part of the frame consists of the incoming parameters and
// the part described by JavaScriptFrameConstants.
unsigned fixed_frame_size = ComputeFixedSize(function);
- unsigned input_frame_size = static_cast<unsigned>(input_->GetFrameSize());
+ unsigned input_frame_size = input_->GetFrameSize();
unsigned output_frame_size = height_in_bytes + fixed_frame_size;
// Allocate and store the output frame description.
FrameDescription* output_frame =
new(output_frame_size) FrameDescription(output_frame_size, function);
+#ifdef DEBUG
+ output_frame->SetKind(Code::FUNCTION);
+#endif
bool is_bottommost = (0 == frame_index);
bool is_topmost = (output_count_ - 1 == frame_index);
@@ -584,7 +590,7 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
output_frame->SetState(Smi::FromInt(state));
// Set the continuation for the topmost frame.
- if (is_topmost) {
+ if (is_topmost && bailout_type_ != DEBUGGER) {
Code* continuation = (bailout_type_ == EAGER)
? isolate_->builtins()->builtin(Builtins::kNotifyDeoptimized)
: isolate_->builtins()->builtin(Builtins::kNotifyLazyDeoptimized);
@@ -596,6 +602,26 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
}
+void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
+ // Set the register values. The values are not important as there are no
+ // callee saved registers in JavaScript frames, so all registers are
+ // spilled. Registers rbp and rsp are set to the correct values though.
+ for (int i = 0; i < Register::kNumRegisters; i++) {
+ input_->SetRegister(i, i * 4);
+ }
+ input_->SetRegister(rsp.code(), reinterpret_cast<intptr_t>(frame->sp()));
+ input_->SetRegister(rbp.code(), reinterpret_cast<intptr_t>(frame->fp()));
+ for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
+ input_->SetDoubleRegister(i, 0.0);
+ }
+
+ // Fill the frame content from the actual data on the frame.
+ for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
+ input_->SetFrameSlot(i, Memory::uint64_at(tos + i));
+ }
+}
+
+
#define __ masm()->
void Deoptimizer::EntryGenerator::Generate() {
diff --git a/deps/v8/src/x64/full-codegen-x64.cc b/deps/v8/src/x64/full-codegen-x64.cc
index 4283f1be6..662992767 100644
--- a/deps/v8/src/x64/full-codegen-x64.cc
+++ b/deps/v8/src/x64/full-codegen-x64.cc
@@ -78,16 +78,18 @@ class JumpPatchSite BASE_EMBEDDED {
}
void EmitPatchInfo() {
- int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site_);
- ASSERT(is_int8(delta_to_patch_site));
- __ testl(rax, Immediate(delta_to_patch_site));
+ if (patch_site_.is_bound()) {
+ int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site_);
+ ASSERT(is_int8(delta_to_patch_site));
+ __ testl(rax, Immediate(delta_to_patch_site));
#ifdef DEBUG
- info_emitted_ = true;
+ info_emitted_ = true;
#endif
+ } else {
+ __ nop(); // Signals no inlined code.
+ }
}
- bool is_bound() const { return patch_site_.is_bound(); }
-
private:
// jc will be patched with jz, jnc will become jnz.
void EmitJump(Condition cc, Label* target, Label::Distance near_jump) {
@@ -121,6 +123,7 @@ class JumpPatchSite BASE_EMBEDDED {
void FullCodeGenerator::Generate(CompilationInfo* info) {
ASSERT(info_ == NULL);
info_ = info;
+ scope_ = info->scope();
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
@@ -140,7 +143,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
__ testq(rcx, rcx);
__ j(zero, &ok, Label::kNear);
// +1 for return address.
- int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
+ int receiver_offset = (info->scope()->num_parameters() + 1) * kPointerSize;
__ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
__ movq(Operand(rsp, receiver_offset), kScratchRegister);
__ bind(&ok);
@@ -152,7 +155,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
__ push(rdi); // Callee's JS Function.
{ Comment cmnt(masm_, "[ Allocate locals");
- int locals_count = scope()->num_stack_slots();
+ int locals_count = info->scope()->num_stack_slots();
if (locals_count == 1) {
__ PushRoot(Heap::kUndefinedValueRootIndex);
} else if (locals_count > 1) {
@@ -166,7 +169,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
bool function_in_register = true;
// Possibly allocate a local context.
- int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
Comment cmnt(masm_, "[ Allocate local context");
// Argument to NewContext is the function, which is still in rdi.
@@ -183,7 +186,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
__ movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
// Copy any necessary parameters into the context.
- int num_parameters = scope()->num_parameters();
+ int num_parameters = info->scope()->num_parameters();
for (int i = 0; i < num_parameters; i++) {
Slot* slot = scope()->parameter(i)->AsSlot();
if (slot != NULL && slot->type() == Slot::CONTEXT) {
@@ -215,11 +218,12 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
__ push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
}
// The receiver is just before the parameters on the caller's stack.
- int offset = scope()->num_parameters() * kPointerSize;
+ int num_parameters = info->scope()->num_parameters();
+ int offset = num_parameters * kPointerSize;
__ lea(rdx,
Operand(rbp, StandardFrameConstants::kCallerSPOffset + offset));
__ push(rdx);
- __ Push(Smi::FromInt(scope()->num_parameters()));
+ __ Push(Smi::FromInt(num_parameters));
// Arguments to ArgumentsAccessStub:
// function, receiver address, parameter count.
// The stub will rewrite receiver and parameter count if the previous
@@ -332,7 +336,7 @@ void FullCodeGenerator::EmitReturnSequence() {
__ movq(rsp, rbp);
__ pop(rbp);
- int arguments_bytes = (scope()->num_parameters() + 1) * kPointerSize;
+ int arguments_bytes = (info_->scope()->num_parameters() + 1) * kPointerSize;
__ Ret(arguments_bytes, rcx);
#ifdef ENABLE_DEBUGGER_SUPPORT
@@ -749,7 +753,7 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
+ __ call(ic);
}
}
}
@@ -822,7 +826,8 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
- EmitCallIC(ic, &patch_site, clause->CompareId());
+ __ call(ic, RelocInfo::CODE_TARGET, clause->CompareId());
+ patch_site.EmitPatchInfo();
__ testq(rax, rax);
__ j(not_equal, &next_test);
@@ -1128,7 +1133,7 @@ void FullCodeGenerator::EmitLoadGlobalSlotCheckExtensions(
RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
? RelocInfo::CODE_TARGET
: RelocInfo::CODE_TARGET_CONTEXT;
- EmitCallIC(ic, mode, AstNode::kNoNumber);
+ __ call(ic, mode);
}
@@ -1208,7 +1213,7 @@ void FullCodeGenerator::EmitDynamicLoadFromSlotFastCase(
__ Move(rax, key_literal->handle());
Handle<Code> ic =
isolate()->builtins()->KeyedLoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(property));
+ __ call(ic, RelocInfo::CODE_TARGET, GetPropertyId(property));
__ jmp(done);
}
}
@@ -1230,7 +1235,7 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var) {
__ Move(rcx, var->name());
__ movq(rax, GlobalObjectOperand());
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT, AstNode::kNoNumber);
+ __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
context()->Plug(rax);
} else if (slot != NULL && slot->type() == Slot::LOOKUP) {
@@ -1378,7 +1383,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, key->id());
+ __ call(ic, RelocInfo::CODE_TARGET, key->id());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
VisitForEffect(value);
@@ -1607,14 +1612,14 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
Literal* key = prop->key()->AsLiteral();
__ Move(rcx, key->handle());
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+ __ call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
}
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+ __ call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
}
@@ -1636,7 +1641,8 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&stub_call);
__ movq(rax, rcx);
BinaryOpStub stub(op, mode);
- EmitCallIC(stub.GetCode(), &patch_site, expr->id());
+ __ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ patch_site.EmitPatchInfo();
__ jmp(&done, Label::kNear);
__ bind(&smi_case);
@@ -1683,8 +1689,9 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
OverwriteMode mode) {
__ pop(rdx);
BinaryOpStub stub(op, mode);
- // NULL signals no inlined smi code.
- EmitCallIC(stub.GetCode(), NULL, expr->id());
+ JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
+ __ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ patch_site.EmitPatchInfo();
context()->Plug(rax);
}
@@ -1724,7 +1731,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
+ __ call(ic);
break;
}
case KEYED_PROPERTY: {
@@ -1737,7 +1744,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
+ __ call(ic);
break;
}
}
@@ -1761,7 +1768,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT, AstNode::kNoNumber);
+ __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
} else if (op == Token::INIT_CONST) {
// Like var declarations, const declarations are hoisted to function
@@ -1854,7 +1861,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ __ call(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@@ -1894,7 +1901,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ __ call(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@@ -1946,7 +1953,7 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr,
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
Handle<Code> ic =
ISOLATE->stub_cache()->ComputeCallInitialize(arg_count, in_loop, mode);
- EmitCallIC(ic, mode, expr->id());
+ __ call(ic, mode, expr->id());
RecordJSReturnSite(expr);
// Restore context register.
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
@@ -1980,7 +1987,7 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
Handle<Code> ic =
ISOLATE->stub_cache()->ComputeKeyedCallInitialize(arg_count, in_loop);
__ movq(rcx, Operand(rsp, (arg_count + 1) * kPointerSize)); // Key.
- EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ __ call(ic, RelocInfo::CODE_TARGET, expr->id());
RecordJSReturnSite(expr);
// Restore context register.
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
@@ -2020,7 +2027,7 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
}
// Push the receiver of the enclosing function and do runtime call.
- __ push(Operand(rbp, (2 + scope()->num_parameters()) * kPointerSize));
+ __ push(Operand(rbp, (2 + info_->scope()->num_parameters()) * kPointerSize));
// Push the strict mode flag.
__ Push(Smi::FromInt(strict_mode_flag()));
@@ -2157,7 +2164,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
} else {
// Call to a keyed property.
// For a synthetic property use keyed load IC followed by function call,
- // for a regular property use keyed EmitCallIC.
+ // for a regular property use EmitKeyedCallWithIC.
if (prop->is_synthetic()) {
// Do not visit the object and key subexpressions (they are shared
// by all occurrences of the same rewritten parameter).
@@ -2175,7 +2182,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
SetSourcePosition(prop->position());
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+ __ call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
// Push result (function).
__ push(rax);
// Push Global receiver.
@@ -2562,7 +2569,7 @@ void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
// parameter count in rax.
VisitForAccumulatorValue(args->at(0));
__ movq(rdx, rax);
- __ Move(rax, Smi::FromInt(scope()->num_parameters()));
+ __ Move(rax, Smi::FromInt(info_->scope()->num_parameters()));
ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
__ CallStub(&stub);
context()->Plug(rax);
@@ -2574,7 +2581,7 @@ void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
Label exit;
// Get the number of formal parameters.
- __ Move(rax, Smi::FromInt(scope()->num_parameters()));
+ __ Move(rax, Smi::FromInt(info_->scope()->num_parameters()));
// Check if the calling frame is an arguments adaptor frame.
__ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
@@ -3507,6 +3514,39 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
}
+void FullCodeGenerator::EmitIsNativeOrStrictMode(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ // Load the function into rax.
+ VisitForAccumulatorValue(args->at(0));
+
+ // Prepare for the test.
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ // Test for strict mode function.
+ __ movq(rdx, FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset));
+ __ testb(FieldOperand(rdx, SharedFunctionInfo::kStrictModeByteOffset),
+ Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
+ __ j(not_equal, if_true);
+
+ // Test for native function.
+ __ testb(FieldOperand(rdx, SharedFunctionInfo::kNativeByteOffset),
+ Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
+ __ j(not_equal, if_true);
+
+ // Not native or strict-mode function.
+ __ jmp(if_false);
+
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ context()->Plug(if_true, if_false);
+}
+
+
void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
Handle<String> name = expr->name();
if (name->length() > 0 && name->Get(0) == '_') {
@@ -3537,7 +3577,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
Handle<Code> ic =
ISOLATE->stub_cache()->ComputeCallInitialize(arg_count, in_loop, mode);
- EmitCallIC(ic, mode, expr->id());
+ __ call(ic, mode, expr->id());
// Restore context register.
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
} else {
@@ -3674,7 +3714,7 @@ void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
// accumulator register rax.
VisitForAccumulatorValue(expr->expression());
SetSourcePosition(expr->position());
- EmitCallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ __ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
context()->Plug(rax);
}
@@ -3795,7 +3835,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ movq(rdx, rax);
__ Move(rax, Smi::FromInt(1));
}
- EmitCallIC(stub.GetCode(), &patch_site, expr->CountId());
+ __ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId());
+ patch_site.EmitPatchInfo();
__ bind(&done);
// Store the value returned in rax.
@@ -3828,7 +3869,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ __ call(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -3845,7 +3886,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ __ call(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -3872,7 +3913,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
// Use a regular load, not a contextual load, to avoid a reference
// error.
- EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
+ __ call(ic);
PrepareForBailout(expr, TOS_REG);
context()->Plug(rax);
} else if (proxy != NULL &&
@@ -4067,7 +4108,8 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC.
SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(op);
- EmitCallIC(ic, &patch_site, expr->id());
+ __ call(ic, RelocInfo::CODE_TARGET, expr->id());
+ patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
__ testq(rax, rax);
@@ -4126,59 +4168,6 @@ Register FullCodeGenerator::context_register() {
}
-void FullCodeGenerator::EmitCallIC(Handle<Code> ic,
- RelocInfo::Mode mode,
- unsigned ast_id) {
- ASSERT(mode == RelocInfo::CODE_TARGET ||
- mode == RelocInfo::CODE_TARGET_CONTEXT);
- Counters* counters = isolate()->counters();
- switch (ic->kind()) {
- case Code::LOAD_IC:
- __ IncrementCounter(counters->named_load_full(), 1);
- break;
- case Code::KEYED_LOAD_IC:
- __ IncrementCounter(counters->keyed_load_full(), 1);
- break;
- case Code::STORE_IC:
- __ IncrementCounter(counters->named_store_full(), 1);
- break;
- case Code::KEYED_STORE_IC:
- __ IncrementCounter(counters->keyed_store_full(), 1);
- default:
- break;
- }
- __ call(ic, mode, ast_id);
-}
-
-
-void FullCodeGenerator::EmitCallIC(Handle<Code> ic,
- JumpPatchSite* patch_site,
- unsigned ast_id) {
- Counters* counters = isolate()->counters();
- switch (ic->kind()) {
- case Code::LOAD_IC:
- __ IncrementCounter(counters->named_load_full(), 1);
- break;
- case Code::KEYED_LOAD_IC:
- __ IncrementCounter(counters->keyed_load_full(), 1);
- break;
- case Code::STORE_IC:
- __ IncrementCounter(counters->named_store_full(), 1);
- break;
- case Code::KEYED_STORE_IC:
- __ IncrementCounter(counters->keyed_store_full(), 1);
- default:
- break;
- }
- __ call(ic, RelocInfo::CODE_TARGET, ast_id);
- if (patch_site != NULL && patch_site->is_bound()) {
- patch_site->EmitPatchInfo();
- } else {
- __ nop(); // Signals no inlined code.
- }
-}
-
-
void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
ASSERT(IsAligned(frame_offset, kPointerSize));
__ movq(Operand(rbp, frame_offset), value);
@@ -4191,19 +4180,20 @@ void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
- if (scope()->is_global_scope()) {
+ Scope* declaration_scope = scope()->DeclarationScope();
+ if (declaration_scope->is_global_scope()) {
// Contexts nested in the global context have a canonical empty function
// as their closure, not the anonymous closure containing the global
// code. Pass a smi sentinel and let the runtime look up the empty
// function.
__ Push(Smi::FromInt(0));
- } else if (scope()->is_eval_scope()) {
+ } else if (declaration_scope->is_eval_scope()) {
// Contexts created by a call to eval have the same closure as the
// context calling eval, not the anonymous closure containing the eval
// code. Fetch it from the context.
__ push(ContextOperand(rsi, Context::CLOSURE_INDEX));
} else {
- ASSERT(scope()->is_function_scope());
+ ASSERT(declaration_scope->is_function_scope());
__ push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
}
}
@@ -4217,11 +4207,11 @@ void FullCodeGenerator::EnterFinallyBlock() {
ASSERT(!result_register().is(rdx));
ASSERT(!result_register().is(rcx));
// Cook return address on top of stack (smi encoded Code* delta)
- __ movq(rdx, Operand(rsp, 0));
+ __ pop(rdx);
__ Move(rcx, masm_->CodeObject());
__ subq(rdx, rcx);
__ Integer32ToSmi(rdx, rdx);
- __ movq(Operand(rsp, 0), rdx);
+ __ push(rdx);
// Store result register while executing finally block.
__ push(result_register());
}
@@ -4230,16 +4220,13 @@ void FullCodeGenerator::EnterFinallyBlock() {
void FullCodeGenerator::ExitFinallyBlock() {
ASSERT(!result_register().is(rdx));
ASSERT(!result_register().is(rcx));
- // Restore result register from stack.
__ pop(result_register());
// Uncook return address.
- __ movq(rdx, Operand(rsp, 0));
+ __ pop(rdx);
__ SmiToInteger32(rdx, rdx);
__ Move(rcx, masm_->CodeObject());
__ addq(rdx, rcx);
- __ movq(Operand(rsp, 0), rdx);
- // And return.
- __ ret(0);
+ __ jmp(rdx);
}
diff --git a/deps/v8/src/x64/ic-x64.cc b/deps/v8/src/x64/ic-x64.cc
index 8919765cb..342f672e6 100644
--- a/deps/v8/src/x64/ic-x64.cc
+++ b/deps/v8/src/x64/ic-x64.cc
@@ -1266,6 +1266,8 @@ static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
Register backing_store = parameter_map;
__ movq(backing_store, FieldOperand(parameter_map, kBackingStoreOffset));
+ Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
+ __ CheckMap(backing_store, fixed_array_map, slow_case, DONT_DO_SMI_CHECK);
__ movq(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset));
__ cmpq(key, scratch);
__ j(greater_equal, slow_case);
diff --git a/deps/v8/src/x64/lithium-codegen-x64.cc b/deps/v8/src/x64/lithium-codegen-x64.cc
index 92c889126..952174dce 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.cc
+++ b/deps/v8/src/x64/lithium-codegen-x64.cc
@@ -1363,7 +1363,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
- Representation r = instr->hydrogen()->representation();
+ Representation r = instr->hydrogen()->value()->representation();
if (r.IsInteger32()) {
Register reg = ToRegister(instr->InputAt(0));
__ testl(reg, reg);
@@ -1376,7 +1376,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
} else {
ASSERT(r.IsTagged());
Register reg = ToRegister(instr->InputAt(0));
- HType type = instr->hydrogen()->type();
+ HType type = instr->hydrogen()->value()->type();
if (type.IsBoolean()) {
__ CompareRoot(reg, Heap::kTrueValueRootIndex);
EmitBranch(true_block, false_block, equal);
@@ -1483,32 +1483,6 @@ void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) {
}
-void LCodeGen::DoCmpID(LCmpID* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
- LOperand* result = instr->result();
-
- Label unordered;
- if (instr->is_double()) {
- // Don't base result on EFLAGS when a NaN is involved. Instead
- // jump to the unordered case, which produces a false value.
- __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
- __ j(parity_even, &unordered, Label::kNear);
- } else {
- EmitCmpI(left, right);
- }
-
- Label done;
- Condition cc = TokenToCondition(instr->op(), instr->is_double());
- __ LoadRoot(ToRegister(result), Heap::kTrueValueRootIndex);
- __ j(cc, &done, Label::kNear);
-
- __ bind(&unordered);
- __ LoadRoot(ToRegister(result), Heap::kFalseValueRootIndex);
- __ bind(&done);
-}
-
-
void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
LOperand* left = instr->InputAt(0);
LOperand* right = instr->InputAt(1);
@@ -1529,22 +1503,6 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
}
-void LCodeGen::DoCmpObjectEq(LCmpObjectEq* instr) {
- Register left = ToRegister(instr->InputAt(0));
- Register right = ToRegister(instr->InputAt(1));
- Register result = ToRegister(instr->result());
-
- Label different, done;
- __ cmpq(left, right);
- __ j(not_equal, &different, Label::kNear);
- __ LoadRoot(result, Heap::kTrueValueRootIndex);
- __ jmp(&done, Label::kNear);
- __ bind(&different);
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ bind(&done);
-}
-
-
void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
Register left = ToRegister(instr->InputAt(0));
Register right = ToRegister(instr->InputAt(1));
@@ -1556,19 +1514,6 @@ void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
}
-void LCodeGen::DoCmpConstantEq(LCmpConstantEq* instr) {
- Register left = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
-
- Label done;
- __ cmpq(left, Immediate(instr->hydrogen()->right()));
- __ LoadRoot(result, Heap::kTrueValueRootIndex);
- __ j(equal, &done, Label::kNear);
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ bind(&done);
-}
-
-
void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
Register left = ToRegister(instr->InputAt(0));
int true_block = chunk_->LookupDestination(instr->true_block_id());
@@ -1579,50 +1524,6 @@ void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
}
-void LCodeGen::DoIsNull(LIsNull* instr) {
- Register reg = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
-
- // If the expression is known to be a smi, then it's
- // definitely not null. Materialize false.
- // Consider adding other type and representation tests too.
- if (instr->hydrogen()->value()->type().IsSmi()) {
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- return;
- }
-
- __ CompareRoot(reg, Heap::kNullValueRootIndex);
- if (instr->is_strict()) {
- ASSERT(Heap::kTrueValueRootIndex >= 0);
- __ movl(result, Immediate(Heap::kTrueValueRootIndex));
- Label load;
- __ j(equal, &load, Label::kNear);
- __ Set(result, Heap::kFalseValueRootIndex);
- __ bind(&load);
- __ LoadRootIndexed(result, result, 0);
- } else {
- Label false_value, true_value, done;
- __ j(equal, &true_value, Label::kNear);
- __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
- __ j(equal, &true_value, Label::kNear);
- __ JumpIfSmi(reg, &false_value, Label::kNear);
- // Check for undetectable objects by looking in the bit field in
- // the map. The object has already been smi checked.
- Register scratch = result;
- __ movq(scratch, FieldOperand(reg, HeapObject::kMapOffset));
- __ testb(FieldOperand(scratch, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- __ j(not_zero, &true_value, Label::kNear);
- __ bind(&false_value);
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ jmp(&done, Label::kNear);
- __ bind(&true_value);
- __ LoadRoot(result, Heap::kTrueValueRootIndex);
- __ bind(&done);
- }
-}
-
-
void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
Register reg = ToRegister(instr->InputAt(0));
@@ -1685,25 +1586,6 @@ Condition LCodeGen::EmitIsObject(Register input,
}
-void LCodeGen::DoIsObject(LIsObject* instr) {
- Register reg = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
- Label is_false, is_true, done;
-
- Condition true_cond = EmitIsObject(reg, &is_false, &is_true);
- __ j(true_cond, &is_true);
-
- __ bind(&is_false);
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ jmp(&done);
-
- __ bind(&is_true);
- __ LoadRoot(result, Heap::kTrueValueRootIndex);
-
- __ bind(&done);
-}
-
-
void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
Register reg = ToRegister(instr->InputAt(0));
@@ -1718,22 +1600,6 @@ void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
}
-void LCodeGen::DoIsSmi(LIsSmi* instr) {
- LOperand* input_operand = instr->InputAt(0);
- Register result = ToRegister(instr->result());
- if (input_operand->IsRegister()) {
- Register input = ToRegister(input_operand);
- __ CheckSmiToIndicator(result, input);
- } else {
- Operand input = ToOperand(instr->InputAt(0));
- __ CheckSmiToIndicator(result, input);
- }
- // result is zero if input is a smi, and one otherwise.
- ASSERT(Heap::kFalseValueRootIndex == Heap::kTrueValueRootIndex + 1);
- __ LoadRootIndexed(result, result, Heap::kTrueValueRootIndex);
-}
-
-
void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -1750,25 +1616,6 @@ void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
}
-void LCodeGen::DoIsUndetectable(LIsUndetectable* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
-
- ASSERT(instr->hydrogen()->value()->representation().IsTagged());
- Label false_label, done;
- __ JumpIfSmi(input, &false_label);
- __ movq(result, FieldOperand(input, HeapObject::kMapOffset));
- __ testb(FieldOperand(result, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- __ j(zero, &false_label);
- __ LoadRoot(result, Heap::kTrueValueRootIndex);
- __ jmp(&done);
- __ bind(&false_label);
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ bind(&done);
-}
-
-
void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
Register input = ToRegister(instr->InputAt(0));
Register temp = ToRegister(instr->TempAt(0));
@@ -1784,7 +1631,7 @@ void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
}
-static InstanceType TestType(HHasInstanceType* instr) {
+static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
InstanceType from = instr->from();
InstanceType to = instr->to();
if (from == FIRST_TYPE) return to;
@@ -1793,7 +1640,7 @@ static InstanceType TestType(HHasInstanceType* instr) {
}
-static Condition BranchCondition(HHasInstanceType* instr) {
+static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
InstanceType from = instr->from();
InstanceType to = instr->to();
if (from == to) return equal;
@@ -1804,25 +1651,6 @@ static Condition BranchCondition(HHasInstanceType* instr) {
}
-void LCodeGen::DoHasInstanceType(LHasInstanceType* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
-
- ASSERT(instr->hydrogen()->value()->representation().IsTagged());
- __ testl(input, Immediate(kSmiTagMask));
- Label done, is_false;
- __ j(zero, &is_false);
- __ CmpObjectType(input, TestType(instr->hydrogen()), result);
- __ j(NegateCondition(BranchCondition(instr->hydrogen())),
- &is_false, Label::kNear);
- __ LoadRoot(result, Heap::kTrueValueRootIndex);
- __ jmp(&done, Label::kNear);
- __ bind(&is_false);
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ bind(&done);
-}
-
-
void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
Register input = ToRegister(instr->InputAt(0));
@@ -1852,21 +1680,6 @@ void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
}
-void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
-
- ASSERT(instr->hydrogen()->value()->representation().IsTagged());
- __ LoadRoot(result, Heap::kTrueValueRootIndex);
- __ testl(FieldOperand(input, String::kHashFieldOffset),
- Immediate(String::kContainsCachedArrayIndexMask));
- Label done;
- __ j(zero, &done, Label::kNear);
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ bind(&done);
-}
-
-
void LCodeGen::DoHasCachedArrayIndexAndBranch(
LHasCachedArrayIndexAndBranch* instr) {
Register input = ToRegister(instr->InputAt(0));
@@ -1935,29 +1748,6 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
}
-void LCodeGen::DoClassOfTest(LClassOfTest* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
- ASSERT(input.is(result));
- Register temp = ToRegister(instr->TempAt(0));
- Handle<String> class_name = instr->hydrogen()->class_name();
- Label done;
- Label is_true, is_false;
-
- EmitClassOfTest(&is_true, &is_false, class_name, input, temp);
-
- __ j(not_equal, &is_false);
-
- __ bind(&is_true);
- __ LoadRoot(result, Heap::kTrueValueRootIndex);
- __ jmp(&done, Label::kNear);
-
- __ bind(&is_false);
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ bind(&done);
-}
-
-
void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
Register input = ToRegister(instr->InputAt(0));
Register temp = ToRegister(instr->TempAt(0));
@@ -4025,29 +3815,6 @@ void LCodeGen::DoTypeof(LTypeof* instr) {
}
-void LCodeGen::DoTypeofIs(LTypeofIs* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
- Label true_label;
- Label false_label;
- Label done;
-
- Condition final_branch_condition = EmitTypeofIs(&true_label,
- &false_label,
- input,
- instr->type_literal());
- __ j(final_branch_condition, &true_label);
- __ bind(&false_label);
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ jmp(&done, Label::kNear);
-
- __ bind(&true_label);
- __ LoadRoot(result, Heap::kTrueValueRootIndex);
-
- __ bind(&done);
-}
-
-
void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
ASSERT(!operand->IsDoubleRegister());
if (operand->IsConstantOperand()) {
@@ -4139,25 +3906,6 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
}
-void LCodeGen::DoIsConstructCall(LIsConstructCall* instr) {
- Register result = ToRegister(instr->result());
- Label true_label;
- Label done;
-
- EmitIsConstructCall(result);
- __ j(equal, &true_label, Label::kNear);
-
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ jmp(&done, Label::kNear);
-
- __ bind(&true_label);
- __ LoadRoot(result, Heap::kTrueValueRootIndex);
-
-
- __ bind(&done);
-}
-
-
void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
Register temp = ToRegister(instr->TempAt(0));
int true_block = chunk_->LookupDestination(instr->true_block_id());
diff --git a/deps/v8/src/x64/lithium-x64.cc b/deps/v8/src/x64/lithium-x64.cc
index 42e60c3d9..58c452102 100644
--- a/deps/v8/src/x64/lithium-x64.cc
+++ b/deps/v8/src/x64/lithium-x64.cc
@@ -267,12 +267,6 @@ void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
}
-void LTypeofIs::PrintDataTo(StringStream* stream) {
- InputAt(0)->PrintTo(stream);
- stream->Add(" == \"%s\"", *hydrogen()->type_literal()->ToCString());
-}
-
-
void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if typeof ");
InputAt(0)->PrintTo(stream);
@@ -342,13 +336,6 @@ void LCallNew::PrintDataTo(StringStream* stream) {
}
-void LClassOfTest::PrintDataTo(StringStream* stream) {
- stream->Add("= class_of_test(");
- InputAt(0)->PrintTo(stream);
- stream->Add(", \"%o\")", *hydrogen()->class_name());
-}
-
-
void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
arguments()->PrintTo(stream);
@@ -985,18 +972,7 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
if (FLAG_stress_environments && !instr->HasEnvironment()) {
instr = AssignEnvironment(instr);
}
- if (current->IsTest() && !instr->IsGoto()) {
- ASSERT(instr->IsControl());
- HTest* test = HTest::cast(current);
- instr->set_hydrogen_value(test->value());
- HBasicBlock* first = test->FirstSuccessor();
- HBasicBlock* second = test->SecondSuccessor();
- ASSERT(first != NULL && second != NULL);
- instr->SetBranchTargets(first->block_id(), second->block_id());
- } else {
- instr->set_hydrogen_value(current);
- }
-
+ instr->set_hydrogen_value(current);
chunk_->AddInstruction(instr, current_block_);
}
current_instruction_ = old_current;
@@ -1041,81 +1017,17 @@ LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
}
-LInstruction* LChunkBuilder::DoTest(HTest* instr) {
+LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
HValue* v = instr->value();
- if (!v->EmitAtUses()) return new LBranch(UseRegisterAtStart(v));
- ASSERT(!v->HasSideEffects());
- if (v->IsClassOfTest()) {
- HClassOfTest* compare = HClassOfTest::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
- return new LClassOfTestAndBranch(UseTempRegister(compare->value()),
- TempRegister());
- } else if (v->IsCompare()) {
- HCompare* compare = HCompare::cast(v);
- HValue* left = compare->left();
- HValue* right = compare->right();
- Representation r = compare->GetInputRepresentation();
- if (r.IsInteger32()) {
- ASSERT(left->representation().IsInteger32());
- ASSERT(right->representation().IsInteger32());
- return new LCmpIDAndBranch(UseRegisterAtStart(left),
- UseOrConstantAtStart(right));
- } else {
- ASSERT(r.IsDouble());
- ASSERT(left->representation().IsDouble());
- ASSERT(right->representation().IsDouble());
- return new LCmpIDAndBranch(UseRegisterAtStart(left),
- UseRegisterAtStart(right));
- }
- } else if (v->IsIsSmi()) {
- HIsSmi* compare = HIsSmi::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
- return new LIsSmiAndBranch(Use(compare->value()));
- } else if (v->IsIsUndetectable()) {
- HIsUndetectable* compare = HIsUndetectable::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
- return new LIsUndetectableAndBranch(UseRegisterAtStart(compare->value()),
- TempRegister());
- } else if (v->IsHasInstanceType()) {
- HHasInstanceType* compare = HHasInstanceType::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
- return new LHasInstanceTypeAndBranch(UseRegisterAtStart(compare->value()));
- } else if (v->IsHasCachedArrayIndex()) {
- HHasCachedArrayIndex* compare = HHasCachedArrayIndex::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
- return new LHasCachedArrayIndexAndBranch(
- UseRegisterAtStart(compare->value()));
- } else if (v->IsIsNull()) {
- HIsNull* compare = HIsNull::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
- // We only need a temp register for non-strict compare.
- LOperand* temp = compare->is_strict() ? NULL : TempRegister();
- return new LIsNullAndBranch(UseRegisterAtStart(compare->value()), temp);
- } else if (v->IsIsObject()) {
- HIsObject* compare = HIsObject::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
- return new LIsObjectAndBranch(UseRegisterAtStart(compare->value()));
- } else if (v->IsCompareObjectEq()) {
- HCompareObjectEq* compare = HCompareObjectEq::cast(v);
- return new LCmpObjectEqAndBranch(UseRegisterAtStart(compare->left()),
- UseRegisterAtStart(compare->right()));
- } else if (v->IsCompareConstantEq()) {
- HCompareConstantEq* compare = HCompareConstantEq::cast(v);
- return new LCmpConstantEqAndBranch(UseRegisterAtStart(compare->value()));
- } else if (v->IsTypeofIs()) {
- HTypeofIs* typeof_is = HTypeofIs::cast(v);
- return new LTypeofIsAndBranch(UseTempRegister(typeof_is->value()));
- } else if (v->IsIsConstructCall()) {
- return new LIsConstructCallAndBranch(TempRegister());
- } else if (v->IsConstant()) {
+ if (v->EmitAtUses()) {
+ ASSERT(v->IsConstant());
+ ASSERT(!v->representation().IsDouble());
HBasicBlock* successor = HConstant::cast(v)->ToBoolean()
? instr->FirstSuccessor()
: instr->SecondSuccessor();
return new LGoto(successor->block_id());
- } else {
- Abort("Undefined compare before branch");
- return NULL;
}
+ return new LBranch(UseRegisterAtStart(v));
}
@@ -1468,85 +1380,83 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
}
-LInstruction* LChunkBuilder::DoCompare(HCompare* instr) {
+LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
Token::Value op = instr->token();
+ ASSERT(instr->left()->representation().IsTagged());
+ ASSERT(instr->right()->representation().IsTagged());
+ bool reversed = (op == Token::GT || op == Token::LTE);
+ LOperand* left = UseFixed(instr->left(), reversed ? rax : rdx);
+ LOperand* right = UseFixed(instr->right(), reversed ? rdx : rax);
+ LCmpT* result = new LCmpT(left, right);
+ return MarkAsCall(DefineFixed(result, rax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareIDAndBranch(
+ HCompareIDAndBranch* instr) {
Representation r = instr->GetInputRepresentation();
if (r.IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
- return DefineAsRegister(new LCmpID(left, right));
- } else if (r.IsDouble()) {
+ return new LCmpIDAndBranch(left, right);
+ } else {
+ ASSERT(r.IsDouble());
ASSERT(instr->left()->representation().IsDouble());
ASSERT(instr->right()->representation().IsDouble());
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
- return DefineAsRegister(new LCmpID(left, right));
- } else {
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
- bool reversed = (op == Token::GT || op == Token::LTE);
- LOperand* left = UseFixed(instr->left(), reversed ? rax : rdx);
- LOperand* right = UseFixed(instr->right(), reversed ? rdx : rax);
- LCmpT* result = new LCmpT(left, right);
- return MarkAsCall(DefineFixed(result, rax), instr);
+ return new LCmpIDAndBranch(left, right);
}
}
-LInstruction* LChunkBuilder::DoCompareObjectEq(HCompareObjectEq* instr) {
+LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
+ HCompareObjectEqAndBranch* instr) {
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
- LCmpObjectEq* result = new LCmpObjectEq(left, right);
- return DefineAsRegister(result);
+ return new LCmpObjectEqAndBranch(left, right);
}
-LInstruction* LChunkBuilder::DoCompareConstantEq(
- HCompareConstantEq* instr) {
- LOperand* left = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LCmpConstantEq(left));
+LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
+ HCompareConstantEqAndBranch* instr) {
+ return new LCmpConstantEqAndBranch(UseRegisterAtStart(instr->value()));
}
-LInstruction* LChunkBuilder::DoIsNull(HIsNull* instr) {
+LInstruction* LChunkBuilder::DoIsNullAndBranch(HIsNullAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
-
- return DefineAsRegister(new LIsNull(value));
+ LOperand* temp = instr->is_strict() ? NULL : TempRegister();
+ return new LIsNullAndBranch(UseRegisterAtStart(instr->value()), temp);
}
-LInstruction* LChunkBuilder::DoIsObject(HIsObject* instr) {
+LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegister(instr->value());
-
- return DefineAsRegister(new LIsObject(value));
+ return new LIsObjectAndBranch(UseRegisterAtStart(instr->value()));
}
-LInstruction* LChunkBuilder::DoIsSmi(HIsSmi* instr) {
+LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseAtStart(instr->value());
-
- return DefineAsRegister(new LIsSmi(value));
+ return new LIsSmiAndBranch(Use(instr->value()));
}
-LInstruction* LChunkBuilder::DoIsUndetectable(HIsUndetectable* instr) {
+LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
+ HIsUndetectableAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
-
- return DefineAsRegister(new LIsUndetectable(value));
+ return new LIsUndetectableAndBranch(UseRegisterAtStart(instr->value()),
+ TempRegister());
}
-LInstruction* LChunkBuilder::DoHasInstanceType(HHasInstanceType* instr) {
+LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
+ HHasInstanceTypeAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
-
- return DefineAsRegister(new LHasInstanceType(value));
+ return new LHasInstanceTypeAndBranch(UseRegisterAtStart(instr->value()));
}
@@ -1559,17 +1469,17 @@ LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
}
-LInstruction* LChunkBuilder::DoHasCachedArrayIndex(
- HHasCachedArrayIndex* instr) {
+LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
+ HHasCachedArrayIndexAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegister(instr->value());
- return DefineAsRegister(new LHasCachedArrayIndex(value));
+ return new LHasCachedArrayIndexAndBranch(UseRegisterAtStart(instr->value()));
}
-LInstruction* LChunkBuilder::DoClassOfTest(HClassOfTest* instr) {
- Abort("Unimplemented: %s", "DoClassOfTest");
- return NULL;
+LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
+ HClassOfTestAndBranch* instr) {
+ return new LClassOfTestAndBranch(UseTempRegister(instr->value()),
+ TempRegister());
}
@@ -2152,13 +2062,14 @@ LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
}
-LInstruction* LChunkBuilder::DoTypeofIs(HTypeofIs* instr) {
- return DefineSameAsFirst(new LTypeofIs(UseRegister(instr->value())));
+LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
+ return new LTypeofIsAndBranch(UseTempRegister(instr->value()));
}
-LInstruction* LChunkBuilder::DoIsConstructCall(HIsConstructCall* instr) {
- return DefineAsRegister(new LIsConstructCall);
+LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
+ HIsConstructCallAndBranch* instr) {
+ return new LIsConstructCallAndBranch(TempRegister());
}
diff --git a/deps/v8/src/x64/lithium-x64.h b/deps/v8/src/x64/lithium-x64.h
index d500dfd87..af0b29991 100644
--- a/deps/v8/src/x64/lithium-x64.h
+++ b/deps/v8/src/x64/lithium-x64.h
@@ -77,13 +77,9 @@ class LCodeGen;
V(ClampDToUint8) \
V(ClampIToUint8) \
V(ClampTToUint8) \
- V(ClassOfTest) \
V(ClassOfTestAndBranch) \
- V(CmpConstantEq) \
V(CmpConstantEqAndBranch) \
- V(CmpID) \
V(CmpIDAndBranch) \
- V(CmpObjectEq) \
V(CmpObjectEqAndBranch) \
V(CmpMapAndBranch) \
V(CmpT) \
@@ -103,9 +99,7 @@ class LCodeGen;
V(GlobalObject) \
V(GlobalReceiver) \
V(Goto) \
- V(HasCachedArrayIndex) \
V(HasCachedArrayIndexAndBranch) \
- V(HasInstanceType) \
V(HasInstanceTypeAndBranch) \
V(In) \
V(InstanceOf) \
@@ -113,15 +107,10 @@ class LCodeGen;
V(InstructionGap) \
V(Integer32ToDouble) \
V(InvokeFunction) \
- V(IsConstructCall) \
V(IsConstructCallAndBranch) \
- V(IsNull) \
V(IsNullAndBranch) \
- V(IsObject) \
V(IsObjectAndBranch) \
- V(IsSmi) \
V(IsSmiAndBranch) \
- V(IsUndetectable) \
V(IsUndetectableAndBranch) \
V(JSArrayLength) \
V(Label) \
@@ -173,7 +162,6 @@ class LCodeGen;
V(Throw) \
V(ToFastProperties) \
V(Typeof) \
- V(TypeofIs) \
V(TypeofIsAndBranch) \
V(UnaryMathOperation) \
V(UnknownOSRValue) \
@@ -233,7 +221,6 @@ class LInstruction: public ZoneObject {
virtual bool IsGap() const { return false; }
virtual bool IsControl() const { return false; }
- virtual void SetBranchTargets(int true_block_id, int false_block_id) { }
void set_environment(LEnvironment* env) { environment_ = env; }
LEnvironment* environment() const { return environment_; }
@@ -457,16 +444,15 @@ class LControlInstruction: public LTemplateInstruction<0, I, T> {
public:
virtual bool IsControl() const { return true; }
- int true_block_id() const { return true_block_id_; }
- int false_block_id() const { return false_block_id_; }
- void SetBranchTargets(int true_block_id, int false_block_id) {
- true_block_id_ = true_block_id;
- false_block_id_ = false_block_id;
- }
+ int SuccessorCount() { return hydrogen()->SuccessorCount(); }
+ HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
+ int true_block_id() { return hydrogen()->SuccessorAt(0)->block_id(); }
+ int false_block_id() { return hydrogen()->SuccessorAt(1)->block_id(); }
private:
- int true_block_id_;
- int false_block_id_;
+ HControlInstruction* hydrogen() {
+ return HControlInstruction::cast(this->hydrogen_value());
+ }
};
@@ -565,23 +551,6 @@ class LMulI: public LTemplateInstruction<1, 2, 0> {
};
-class LCmpID: public LTemplateInstruction<1, 2, 0> {
- public:
- LCmpID(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpID, "cmp-id")
- DECLARE_HYDROGEN_ACCESSOR(Compare)
-
- Token::Value op() const { return hydrogen()->token(); }
- bool is_double() const {
- return hydrogen()->GetInputRepresentation().IsDouble();
- }
-};
-
-
class LCmpIDAndBranch: public LControlInstruction<2, 0> {
public:
LCmpIDAndBranch(LOperand* left, LOperand* right) {
@@ -590,7 +559,7 @@ class LCmpIDAndBranch: public LControlInstruction<2, 0> {
}
DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(Compare)
+ DECLARE_HYDROGEN_ACCESSOR(CompareIDAndBranch)
Token::Value op() const { return hydrogen()->token(); }
bool is_double() const {
@@ -615,17 +584,6 @@ class LUnaryMathOperation: public LTemplateInstruction<1, 1, 0> {
};
-class LCmpObjectEq: public LTemplateInstruction<1, 2, 0> {
- public:
- LCmpObjectEq(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpObjectEq, "cmp-object-eq")
-};
-
-
class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
public:
LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
@@ -638,17 +596,6 @@ class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
};
-class LCmpConstantEq: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCmpConstantEq(LOperand* left) {
- inputs_[0] = left;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpConstantEq, "cmp-constant-eq")
- DECLARE_HYDROGEN_ACCESSOR(CompareConstantEq)
-};
-
-
class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
public:
explicit LCmpConstantEqAndBranch(LOperand* left) {
@@ -657,20 +604,7 @@ class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CmpConstantEqAndBranch,
"cmp-constant-eq-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareConstantEq)
-};
-
-
-class LIsNull: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LIsNull(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(IsNull, "is-null")
- DECLARE_HYDROGEN_ACCESSOR(IsNull)
-
- bool is_strict() const { return hydrogen()->is_strict(); }
+ DECLARE_HYDROGEN_ACCESSOR(CompareConstantEqAndBranch)
};
@@ -682,7 +616,7 @@ class LIsNullAndBranch: public LControlInstruction<1, 1> {
}
DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsNull)
+ DECLARE_HYDROGEN_ACCESSOR(IsNullAndBranch)
bool is_strict() const { return hydrogen()->is_strict(); }
@@ -690,16 +624,6 @@ class LIsNullAndBranch: public LControlInstruction<1, 1> {
};
-class LIsObject: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LIsObject(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(IsObject, "is-object")
-};
-
-
class LIsObjectAndBranch: public LControlInstruction<1, 0> {
public:
explicit LIsObjectAndBranch(LOperand* value) {
@@ -707,22 +631,12 @@ class LIsObjectAndBranch: public LControlInstruction<1, 0> {
}
DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
virtual void PrintDataTo(StringStream* stream);
};
-class LIsSmi: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LIsSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(IsSmi, "is-smi")
- DECLARE_HYDROGEN_ACCESSOR(IsSmi)
-};
-
-
class LIsSmiAndBranch: public LControlInstruction<1, 0> {
public:
explicit LIsSmiAndBranch(LOperand* value) {
@@ -730,22 +644,12 @@ class LIsSmiAndBranch: public LControlInstruction<1, 0> {
}
DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
virtual void PrintDataTo(StringStream* stream);
};
-class LIsUndetectable: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LIsUndetectable(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(IsUndetectable, "is-undetectable")
- DECLARE_HYDROGEN_ACCESSOR(IsUndetectable)
-};
-
-
class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
public:
explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
@@ -755,22 +659,12 @@ class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
"is-undetectable-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
virtual void PrintDataTo(StringStream* stream);
};
-class LHasInstanceType: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LHasInstanceType(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(HasInstanceType, "has-instance-type")
- DECLARE_HYDROGEN_ACCESSOR(HasInstanceType)
-};
-
-
class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
public:
explicit LHasInstanceTypeAndBranch(LOperand* value) {
@@ -779,7 +673,7 @@ class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
"has-instance-type-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(HasInstanceType)
+ DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
virtual void PrintDataTo(StringStream* stream);
};
@@ -796,17 +690,6 @@ class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
};
-class LHasCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LHasCachedArrayIndex(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndex, "has-cached-array-index")
- DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndex)
-};
-
-
class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
public:
explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
@@ -815,19 +698,7 @@ class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
"has-cached-array-index-and-branch")
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LClassOfTest: public LTemplateInstruction<1, 1, 1> {
- public:
- LClassOfTest(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ClassOfTest, "class-of-test")
- DECLARE_HYDROGEN_ACCESSOR(ClassOfTest)
+ DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
virtual void PrintDataTo(StringStream* stream);
};
@@ -842,7 +713,7 @@ class LClassOfTestAndBranch: public LControlInstruction<1, 1> {
DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
"class-of-test-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(ClassOfTest)
+ DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
virtual void PrintDataTo(StringStream* stream);
};
@@ -856,7 +727,7 @@ class LCmpT: public LTemplateInstruction<1, 2, 0> {
}
DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
- DECLARE_HYDROGEN_ACCESSOR(Compare)
+ DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
Token::Value op() const { return hydrogen()->token(); }
};
@@ -1002,7 +873,7 @@ class LBranch: public LControlInstruction<1, 0> {
}
DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
- DECLARE_HYDROGEN_ACCESSOR(Value)
+ DECLARE_HYDROGEN_ACCESSOR(Branch)
virtual void PrintDataTo(StringStream* stream);
};
@@ -1972,21 +1843,6 @@ class LTypeof: public LTemplateInstruction<1, 1, 0> {
};
-class LTypeofIs: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LTypeofIs(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(TypeofIs, "typeof-is")
- DECLARE_HYDROGEN_ACCESSOR(TypeofIs)
-
- Handle<String> type_literal() { return hydrogen()->type_literal(); }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
public:
explicit LTypeofIsAndBranch(LOperand* value) {
@@ -1994,7 +1850,7 @@ class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
}
DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(TypeofIs)
+ DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
Handle<String> type_literal() { return hydrogen()->type_literal(); }
@@ -2002,13 +1858,6 @@ class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
};
-class LIsConstructCall: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(IsConstructCall, "is-construct-call")
- DECLARE_HYDROGEN_ACCESSOR(IsConstructCall)
-};
-
-
class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
public:
explicit LIsConstructCallAndBranch(LOperand* temp) {
@@ -2017,6 +1866,7 @@ class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch,
"is-construct-call-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsConstructCallAndBranch)
};
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index 7c8a3667e..dbed6e0fd 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -3628,17 +3628,14 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
movq(dst, rsi);
}
- // We should not have found a with or catch context by walking the context
- // chain (i.e., the static scope chain and runtime context chain do not
- // agree). A variable occurring in such a scope should have slot type
- // LOOKUP and not CONTEXT.
+ // We should not have found a with context by walking the context
+ // chain (i.e., the static scope chain and runtime context chain do
+ // not agree). A variable occurring in such a scope should have
+ // slot type LOOKUP and not CONTEXT.
if (emit_debug_code()) {
CompareRoot(FieldOperand(dst, HeapObject::kMapOffset),
Heap::kWithContextMapRootIndex);
Check(not_equal, "Variable resolved to with context.");
- CompareRoot(FieldOperand(dst, HeapObject::kMapOffset),
- Heap::kCatchContextMapRootIndex);
- Check(not_equal, "Variable resolved to catch context.");
}
}
diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc
index 097743dd1..1531f905d 100644
--- a/deps/v8/test/cctest/test-api.cc
+++ b/deps/v8/test/cctest/test-api.cc
@@ -14659,3 +14659,28 @@ THREADED_TEST(ReadOnlyIndexedProperties) {
obj->Set(v8_str("2000000000"), v8_str("foobar"));
CHECK_EQ(v8_str("DONT_CHANGE"), obj->Get(v8_str("2000000000")));
}
+
+
+THREADED_TEST(Regress1516) {
+ v8::HandleScope scope;
+
+ LocalContext context;
+ { v8::HandleScope temp_scope;
+ CompileRun("({'a': 0})");
+ }
+
+ int elements;
+ { i::MapCache* map_cache =
+ i::MapCache::cast(i::Isolate::Current()->context()->map_cache());
+ elements = map_cache->NumberOfElements();
+ CHECK_LE(1, elements);
+ }
+
+ i::Isolate::Current()->heap()->CollectAllGarbage(true);
+ { i::Object* raw_map_cache = i::Isolate::Current()->context()->map_cache();
+ if (raw_map_cache != i::Isolate::Current()->heap()->undefined_value()) {
+ i::MapCache* map_cache = i::MapCache::cast(raw_map_cache);
+ CHECK_GT(elements, map_cache->NumberOfElements());
+ }
+ }
+}
diff --git a/deps/v8/test/cctest/test-compiler.cc b/deps/v8/test/cctest/test-compiler.cc
index 4c5f197bc..72907b6e9 100644
--- a/deps/v8/test/cctest/test-compiler.cc
+++ b/deps/v8/test/cctest/test-compiler.cc
@@ -375,10 +375,15 @@ static void CheckCodeForUnsafeLiteral(Handle<JSFunction> f) {
v8::internal::EmbeddedVector<char, 128> decode_buffer;
while (pc < end) {
- pc += d.InstructionDecode(decode_buffer, pc);
- CHECK(strstr(decode_buffer.start(), "mov eax,0x178c29c") == NULL);
- CHECK(strstr(decode_buffer.start(), "push 0x178c29c") == NULL);
- CHECK(strstr(decode_buffer.start(), "0x178c29c") == NULL);
+ int num_const = d.ConstantPoolSizeAt(pc);
+ if (num_const >= 0) {
+ pc += num_const * kPointerSize;
+ } else {
+ pc += d.InstructionDecode(decode_buffer, pc);
+ CHECK(strstr(decode_buffer.start(), "mov eax,0x178c29c") == NULL);
+ CHECK(strstr(decode_buffer.start(), "push 0x178c29c") == NULL);
+ CHECK(strstr(decode_buffer.start(), "0x178c29c") == NULL);
+ }
}
}
}
diff --git a/deps/v8/test/cctest/test-log.cc b/deps/v8/test/cctest/test-log.cc
index e7a178cc5..10a90bcf1 100644
--- a/deps/v8/test/cctest/test-log.cc
+++ b/deps/v8/test/cctest/test-log.cc
@@ -251,7 +251,7 @@ static void CheckThatProfilerWorks(LogBufferMatcher* matcher) {
!LoggerTestHelper::IsSamplerActive());
LoggerTestHelper::ResetSamplesTaken();
- LOGGER->ResumeProfiler(v8::PROFILER_MODULE_CPU, 0);
+ LOGGER->ResumeProfiler();
CHECK(LoggerTestHelper::IsSamplerActive());
// Verify that the current map of compiled functions has been logged.
@@ -273,7 +273,7 @@ static void CheckThatProfilerWorks(LogBufferMatcher* matcher) {
i::OS::Sleep(1);
}
- LOGGER->PauseProfiler(v8::PROFILER_MODULE_CPU, 0);
+ LOGGER->PauseProfiler();
CHECK(i::RuntimeProfiler::IsEnabled() ||
!LoggerTestHelper::IsSamplerActive());
@@ -614,99 +614,13 @@ TEST(LogAccessorCallbacks) {
}
-TEST(LogTags) {
- ScopedLoggerInitializer initialize_logger(false);
- LogBufferMatcher matcher;
-
- const char* open_tag = "open-tag,";
- const char* close_tag = "close-tag,";
-
- // Check compatibility with the old style behavior.
- CHECK_EQ(v8::PROFILER_MODULE_NONE, LOGGER->GetActiveProfilerModules());
- LOGGER->ResumeProfiler(v8::PROFILER_MODULE_CPU, 0);
- CHECK_EQ(v8::PROFILER_MODULE_CPU, LOGGER->GetActiveProfilerModules());
- LOGGER->PauseProfiler(v8::PROFILER_MODULE_CPU, 0);
- CHECK_EQ(v8::PROFILER_MODULE_NONE, LOGGER->GetActiveProfilerModules());
- CHECK_EQ(NULL, matcher.Find(open_tag));
- CHECK_EQ(NULL, matcher.Find(close_tag));
-
- const char* open_tag1 = "open-tag,1\n";
- const char* close_tag1 = "close-tag,1\n";
-
- // Check non-nested tag case.
- CHECK_EQ(v8::PROFILER_MODULE_NONE, LOGGER->GetActiveProfilerModules());
- LOGGER->ResumeProfiler(v8::PROFILER_MODULE_CPU, 1);
- CHECK_EQ(v8::PROFILER_MODULE_CPU, LOGGER->GetActiveProfilerModules());
- LOGGER->PauseProfiler(v8::PROFILER_MODULE_CPU, 1);
- CHECK_EQ(v8::PROFILER_MODULE_NONE, LOGGER->GetActiveProfilerModules());
- CHECK_GT(matcher.GetNextChunk(), 0);
- CHECK(matcher.IsInSequence(open_tag1, close_tag1));
-
- const char* open_tag2 = "open-tag,2\n";
- const char* close_tag2 = "close-tag,2\n";
-
- // Check nested tags case.
- CHECK_EQ(v8::PROFILER_MODULE_NONE, LOGGER->GetActiveProfilerModules());
- LOGGER->ResumeProfiler(v8::PROFILER_MODULE_CPU, 1);
- CHECK_EQ(v8::PROFILER_MODULE_CPU, LOGGER->GetActiveProfilerModules());
- LOGGER->ResumeProfiler(v8::PROFILER_MODULE_CPU, 2);
- CHECK_EQ(v8::PROFILER_MODULE_CPU, LOGGER->GetActiveProfilerModules());
- LOGGER->PauseProfiler(v8::PROFILER_MODULE_CPU, 2);
- CHECK_EQ(v8::PROFILER_MODULE_CPU, LOGGER->GetActiveProfilerModules());
- LOGGER->PauseProfiler(v8::PROFILER_MODULE_CPU, 1);
- CHECK_EQ(v8::PROFILER_MODULE_NONE, LOGGER->GetActiveProfilerModules());
- CHECK_GT(matcher.GetNextChunk(), 0);
- // open_tag1 < open_tag2 < close_tag2 < close_tag1
- CHECK(matcher.IsInSequence(open_tag1, open_tag2));
- CHECK(matcher.IsInSequence(open_tag2, close_tag2));
- CHECK(matcher.IsInSequence(close_tag2, close_tag1));
-
- // Check overlapped tags case.
- CHECK_EQ(v8::PROFILER_MODULE_NONE, LOGGER->GetActiveProfilerModules());
- LOGGER->ResumeProfiler(v8::PROFILER_MODULE_CPU, 1);
- CHECK_EQ(v8::PROFILER_MODULE_CPU, LOGGER->GetActiveProfilerModules());
- LOGGER->ResumeProfiler(v8::PROFILER_MODULE_CPU, 2);
- CHECK_EQ(v8::PROFILER_MODULE_CPU, LOGGER->GetActiveProfilerModules());
- LOGGER->PauseProfiler(v8::PROFILER_MODULE_CPU, 1);
- CHECK_EQ(v8::PROFILER_MODULE_CPU, LOGGER->GetActiveProfilerModules());
- LOGGER->PauseProfiler(v8::PROFILER_MODULE_CPU, 2);
- CHECK_EQ(v8::PROFILER_MODULE_NONE, LOGGER->GetActiveProfilerModules());
- CHECK_GT(matcher.GetNextChunk(), 0);
- // open_tag1 < open_tag2 < close_tag1 < close_tag2
- CHECK(matcher.IsInSequence(open_tag1, open_tag2));
- CHECK(matcher.IsInSequence(open_tag2, close_tag1));
- CHECK(matcher.IsInSequence(close_tag1, close_tag2));
-
- const char* open_tag3 = "open-tag,3\n";
- const char* close_tag3 = "close-tag,3\n";
-
- // Check pausing overflow case.
- CHECK_EQ(v8::PROFILER_MODULE_NONE, LOGGER->GetActiveProfilerModules());
- LOGGER->ResumeProfiler(v8::PROFILER_MODULE_CPU, 1);
- CHECK_EQ(v8::PROFILER_MODULE_CPU, LOGGER->GetActiveProfilerModules());
- LOGGER->ResumeProfiler(v8::PROFILER_MODULE_CPU, 2);
- CHECK_EQ(v8::PROFILER_MODULE_CPU, LOGGER->GetActiveProfilerModules());
- LOGGER->PauseProfiler(v8::PROFILER_MODULE_CPU, 2);
- CHECK_EQ(v8::PROFILER_MODULE_CPU, LOGGER->GetActiveProfilerModules());
- LOGGER->PauseProfiler(v8::PROFILER_MODULE_CPU, 1);
- CHECK_EQ(v8::PROFILER_MODULE_NONE, LOGGER->GetActiveProfilerModules());
- LOGGER->PauseProfiler(v8::PROFILER_MODULE_CPU, 3);
- CHECK_EQ(v8::PROFILER_MODULE_NONE, LOGGER->GetActiveProfilerModules());
- LOGGER->ResumeProfiler(v8::PROFILER_MODULE_CPU, 3);
- CHECK_EQ(v8::PROFILER_MODULE_NONE, LOGGER->GetActiveProfilerModules());
- // Must be no tags, because logging must be disabled.
- CHECK_EQ(NULL, matcher.Find(open_tag3));
- CHECK_EQ(NULL, matcher.Find(close_tag3));
-}
-
-
TEST(IsLoggingPreserved) {
ScopedLoggerInitializer initialize_logger(false);
CHECK(LOGGER->is_logging());
- LOGGER->ResumeProfiler(v8::PROFILER_MODULE_CPU, 1);
+ LOGGER->ResumeProfiler();
CHECK(LOGGER->is_logging());
- LOGGER->PauseProfiler(v8::PROFILER_MODULE_CPU, 1);
+ LOGGER->PauseProfiler();
CHECK(LOGGER->is_logging());
}
diff --git a/deps/v8/test/mjsunit/assert-opt-and-deopt.js b/deps/v8/test/mjsunit/assert-opt-and-deopt.js
index b624ba560..f58986865 100644
--- a/deps/v8/test/mjsunit/assert-opt-and-deopt.js
+++ b/deps/v8/test/mjsunit/assert-opt-and-deopt.js
@@ -148,7 +148,7 @@ tracker.AssertIsOptimized(f, false);
tracker.AssertDeoptHappened(f, false);
tracker.AssertDeoptCount(f, 0);
-for (var i = 0; i < 5; i++) f(1);
+for (var i = 0; i < 2; i++) f(1);
tracker.AssertOptCount(f, 0);
tracker.AssertIsOptimized(f, false);
diff --git a/deps/v8/test/mjsunit/date-parse.js b/deps/v8/test/mjsunit/date-parse.js
index 23a69934d..a1eef663b 100644
--- a/deps/v8/test/mjsunit/date-parse.js
+++ b/deps/v8/test/mjsunit/date-parse.js
@@ -285,9 +285,9 @@ for (var i = 0; i < 24 * 365 * 100; i += 150) {
// Negative tests.
var testCasesNegative = [
- 'May 25 2008 1:30 (PM)) UTC',
- 'May 25 2008 1:30( )AM (PM)',
- 'May 25 2008 AAA (GMT)'];
+ 'May 25 2008 1:30 (PM)) UTC', // Bad unmatched ')' after number.
+ 'May 25 2008 1:30( )AM (PM)', //
+ 'May 25 2008 AAA (GMT)']; // Unknown word after number.
testCasesNegative.forEach(function (s) {
assertTrue(isNaN(Date.parse(s)), s + " is not NaN.");
diff --git a/deps/v8/test/mjsunit/date.js b/deps/v8/test/mjsunit/date.js
index f13af8266..a7f6cfa7d 100644
--- a/deps/v8/test/mjsunit/date.js
+++ b/deps/v8/test/mjsunit/date.js
@@ -187,3 +187,123 @@ d = new Date(1969, 12, 1, Infinity);
assertTrue(isNaN(d.getTime()));
d = new Date(1969, 12, 1, -Infinity);
assertTrue(isNaN(d.getTime()));
+
+// Parsing ES5 ISO-8601 dates.
+// When TZ is omitted, it defaults to 'Z' meaning UTC.
+
+// Check epoch.
+assertEquals(0, Date.parse("1970-01-01T00:00:00.000+00:00"));
+assertEquals(0, Date.parse("1970-01-01T00:00:00.000-00:00"));
+assertEquals(0, Date.parse("1970-01-01T00:00:00.000Z"));
+assertEquals(0, Date.parse("1970-01-01T00:00:00.000"));
+assertEquals(0, Date.parse("1970-01-01T00:00:00"));
+assertEquals(0, Date.parse("1970-01-01T00:00"));
+assertEquals(0, Date.parse("1970-01-01"));
+
+assertEquals(0, Date.parse("1970-01T00:00:00.000+00:00"));
+assertEquals(0, Date.parse("1970-01T00:00:00.000-00:00"));
+assertEquals(0, Date.parse("1970-01T00:00:00.000Z"));
+assertEquals(0, Date.parse("1970-01T00:00:00.000"));
+assertEquals(0, Date.parse("1970-01T00:00:00"));
+assertEquals(0, Date.parse("1970-01T00:00"));
+assertEquals(0, Date.parse("1970-01"));
+
+assertEquals(0, Date.parse("1970T00:00:00.000+00:00"));
+assertEquals(0, Date.parse("1970T00:00:00.000-00:00"));
+assertEquals(0, Date.parse("1970T00:00:00.000Z"));
+assertEquals(0, Date.parse("1970T00:00:00.000"));
+assertEquals(0, Date.parse("1970T00:00:00"));
+assertEquals(0, Date.parse("1970T00:00"));
+assertEquals(0, Date.parse("1970"));
+
+assertEquals(0, Date.parse("+001970-01-01T00:00:00.000+00:00"));
+assertEquals(0, Date.parse("+001970-01-01T00:00:00.000-00:00"));
+assertEquals(0, Date.parse("+001970-01-01T00:00:00.000Z"));
+assertEquals(0, Date.parse("+001970-01-01T00:00:00.000"));
+assertEquals(0, Date.parse("+001970-01-01T00:00:00"));
+assertEquals(0, Date.parse("+001970-01-01T00:00"));
+assertEquals(0, Date.parse("+001970-01-01"));
+
+assertEquals(0, Date.parse("+001970-01T00:00:00.000+00:00"));
+assertEquals(0, Date.parse("+001970-01T00:00:00.000-00:00"));
+assertEquals(0, Date.parse("+001970-01T00:00:00.000Z"));
+assertEquals(0, Date.parse("+001970-01T00:00:00.000"));
+assertEquals(0, Date.parse("+001970-01T00:00:00"));
+assertEquals(0, Date.parse("+001970-01T00:00"));
+assertEquals(0, Date.parse("+001970-01"));
+
+assertEquals(0, Date.parse("+001970T00:00:00.000+00:00"));
+assertEquals(0, Date.parse("+001970T00:00:00.000-00:00"));
+assertEquals(0, Date.parse("+001970T00:00:00.000Z"));
+assertEquals(0, Date.parse("+001970T00:00:00.000"));
+assertEquals(0, Date.parse("+001970T00:00:00"));
+assertEquals(0, Date.parse("+001970T00:00"));
+assertEquals(0, Date.parse("+001970"));
+
+// Check random date.
+assertEquals(70671003500, Date.parse("1972-03-28T23:50:03.500+01:00"));
+assertEquals(70674603500, Date.parse("1972-03-28T23:50:03.500Z"));
+assertEquals(70674603500, Date.parse("1972-03-28T23:50:03.500"));
+assertEquals(70674603000, Date.parse("1972-03-28T23:50:03"));
+assertEquals(70674600000, Date.parse("1972-03-28T23:50"));
+assertEquals(70588800000, Date.parse("1972-03-28"));
+
+assertEquals(68338203500, Date.parse("1972-03T23:50:03.500+01:00"));
+assertEquals(68341803500, Date.parse("1972-03T23:50:03.500Z"));
+assertEquals(68341803500, Date.parse("1972-03T23:50:03.500"));
+assertEquals(68341803000, Date.parse("1972-03T23:50:03"));
+assertEquals(68341800000, Date.parse("1972-03T23:50"));
+assertEquals(68256000000, Date.parse("1972-03"));
+
+assertEquals(63154203500, Date.parse("1972T23:50:03.500+01:00"));
+assertEquals(63157803500, Date.parse("1972T23:50:03.500Z"));
+assertEquals(63157803500, Date.parse("1972T23:50:03.500"));
+assertEquals(63157803000, Date.parse("1972T23:50:03"));
+assertEquals(63072000000, Date.parse("1972"));
+
+assertEquals(70671003500, Date.parse("+001972-03-28T23:50:03.500+01:00"));
+assertEquals(70674603500, Date.parse("+001972-03-28T23:50:03.500Z"));
+assertEquals(70674603500, Date.parse("+001972-03-28T23:50:03.500"));
+assertEquals(70674603000, Date.parse("+001972-03-28T23:50:03"));
+assertEquals(70674600000, Date.parse("+001972-03-28T23:50"));
+assertEquals(70588800000, Date.parse("+001972-03-28"));
+
+assertEquals(68338203500, Date.parse("+001972-03T23:50:03.500+01:00"));
+assertEquals(68341803500, Date.parse("+001972-03T23:50:03.500Z"));
+assertEquals(68341803500, Date.parse("+001972-03T23:50:03.500"));
+assertEquals(68341803000, Date.parse("+001972-03T23:50:03"));
+assertEquals(68341800000, Date.parse("+001972-03T23:50"));
+assertEquals(68256000000, Date.parse("+001972-03"));
+
+assertEquals(63154203500, Date.parse("+001972T23:50:03.500+01:00"));
+assertEquals(63157803500, Date.parse("+001972T23:50:03.500Z"));
+assertEquals(63157803500, Date.parse("+001972T23:50:03.500"));
+assertEquals(63157803000, Date.parse("+001972T23:50:03"));
+assertEquals(63072000000, Date.parse("+001972"));
+
+
+// Ensure that ISO-years in the range 00-99 aren't translated to the range
+// 1950..2049.
+assertEquals(-60904915200000, Date.parse("0040-01-01"));
+assertEquals(-60273763200000, Date.parse("0060-01-01"));
+assertEquals(-62167219200000, Date.parse("0000-01-01"));
+assertEquals(-62167219200000, Date.parse("+000000-01-01"));
+
+// Test negative years.
+assertEquals(-63429523200000, Date.parse("-000040-01-01"));
+assertEquals(-64060675200000, Date.parse("-000060-01-01"));
+assertEquals(-124397510400000, Date.parse("-001972-01-01"));
+
+// Check time-zones.
+assertEquals(70674603500, Date.parse("1972-03-28T23:50:03.500Z"));
+for (var i = 0; i < 24; i++) {
+ var hh = (i < 10) ? "0" + i : "" + i;
+ for (var j = 0; j < 60; j += 15) {
+ var mm = (j < 10) ? "0" + j : "" + j;
+ var ms = (i * 60 + j) * 60000;
+ var string = "1972-03-28T23:50:03.500-" + hh + ":" + mm;
+ assertEquals(70674603500 + ms, Date.parse(string), string);
+ string = "1972-03-28T23:50:03.500+" + hh + ":" + mm;
+ assertEquals(70674603500 - ms, Date.parse(string), string);
+ }
+}
diff --git a/deps/v8/test/mjsunit/debug-evaluate-locals-optimized-double.js b/deps/v8/test/mjsunit/debug-evaluate-locals-optimized-double.js
new file mode 100644
index 000000000..10dfbabf0
--- /dev/null
+++ b/deps/v8/test/mjsunit/debug-evaluate-locals-optimized-double.js
@@ -0,0 +1,132 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax
+// Get the Debug object exposed from the debug context global object.
+Debug = debug.Debug
+
+listenerComplete = false;
+exception = false;
+
+
+function listener(event, exec_state, event_data, data) {
+ try {
+ if (event == Debug.DebugEvent.Break)
+ {
+ assertEquals(6, exec_state.frameCount());
+
+ for (var i = 0; i < exec_state.frameCount(); i++) {
+ var frame = exec_state.frame(i);
+ // All frames except the bottom one has normal variables a and b.
+ if (i < exec_state.frameCount() - 1) {
+ assertEquals('a', frame.localName(0));
+ assertEquals('b', frame.localName(1));
+ assertEquals(i * 2 + 1 + (i * 2 + 1) / 100,
+ frame.localValue(0).value());
+ assertEquals(i * 2 + 2 + (i * 2 + 2) / 100,
+ frame.localValue(1).value());
+ }
+
+ // When function f is optimized (2 means YES, see runtime.cc) we
+ // expect an optimized frame for f with g1, g2 and g3 inlined.
+ if (%GetOptimizationStatus(f) == 2) {
+ if (i == 1 || i == 2 || i == 3) {
+ assertTrue(frame.isOptimizedFrame());
+ assertTrue(frame.isInlinedFrame());
+ } else if (i == 4) {
+ assertTrue(frame.isOptimizedFrame());
+ assertFalse(frame.isInlinedFrame());
+ } else {
+ assertFalse(frame.isOptimizedFrame());
+ assertFalse(frame.isInlinedFrame());
+ }
+ }
+ }
+
+ // Indicate that all was processed.
+ listenerComplete = true;
+ }
+ } catch (e) {
+ exception = e
+ };
+};
+
+f();f();f();
+%OptimizeFunctionOnNextCall(f);
+f();
+
+// Add the debug event listener.
+Debug.setListener(listener);
+
+function h(x, y) {
+ var a = 1;
+ var b = 2;
+ a = a + a / 100;
+ b = b + b / 100;
+ debugger; // Breakpoint.
+};
+
+function g3(x, y) {
+ var a = 3;
+ var b = 4;
+ a = a + a / 100;
+ b = b + b / 100;
+ h(a, b);
+ return a+b;
+};
+
+function g2(x, y) {
+ var a = 5;
+ var b = 6;
+ a = a + a / 100;
+ b = b + b / 100;
+ g3(a, b);
+};
+
+function g1(x, y) {
+ var a = 7;
+ var b = 8;
+ a = a + a / 100;
+ b = b + b / 100;
+ g2(a, b);
+};
+
+function f(x, y) {
+ var a = 9;
+ var b = 10;
+ a = a + a / 100;
+ b = b + b / 100;
+ g1(a, b);
+};
+
+f(11.11, 12.12);
+
+// Make sure that the debug event listener vas invoked.
+assertFalse(exception, "exception in listener " + exception)
+assertTrue(listenerComplete);
+
+Debug.setListener(null);
diff --git a/deps/v8/test/mjsunit/debug-evaluate-locals-optimized.js b/deps/v8/test/mjsunit/debug-evaluate-locals-optimized.js
new file mode 100644
index 000000000..cdff0b718
--- /dev/null
+++ b/deps/v8/test/mjsunit/debug-evaluate-locals-optimized.js
@@ -0,0 +1,119 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax
+// Get the Debug object exposed from the debug context global object.
+Debug = debug.Debug
+
+listenerComplete = false;
+exception = false;
+
+
+function listener(event, exec_state, event_data, data) {
+ try {
+ if (event == Debug.DebugEvent.Break)
+ {
+ assertEquals(6, exec_state.frameCount());
+
+ for (var i = 0; i < exec_state.frameCount(); i++) {
+ var frame = exec_state.frame(i);
+ // All frames except the bottom one has normal variables a and b.
+ if (i < exec_state.frameCount() - 1) {
+ assertEquals('a', frame.localName(0));
+ assertEquals('b', frame.localName(1));
+ assertEquals(i * 2 + 1, frame.localValue(0).value());
+ assertEquals(i * 2 + 2, frame.localValue(1).value());
+ }
+
+ // When function f is optimized (2 means YES, see runtime.cc) we
+ // expect an optimized frame for f with g1, g2 and g3 inlined.
+ if (%GetOptimizationStatus(f) == 2) {
+ if (i == 1 || i == 2 || i == 3) {
+ assertTrue(frame.isOptimizedFrame());
+ assertTrue(frame.isInlinedFrame());
+ } else if (i == 4) {
+ assertTrue(frame.isOptimizedFrame());
+ assertFalse(frame.isInlinedFrame());
+ } else {
+ assertFalse(frame.isOptimizedFrame());
+ assertFalse(frame.isInlinedFrame());
+ }
+ }
+ }
+
+ // Indicate that all was processed.
+ listenerComplete = true;
+ }
+ } catch (e) {
+ exception = e
+ };
+};
+
+f();f();f();
+%OptimizeFunctionOnNextCall(f);
+f();
+
+// Add the debug event listener.
+Debug.setListener(listener);
+
+function h(x, y) {
+ var a = 1;
+ var b = 2;
+ debugger; // Breakpoint.
+};
+
+function g3(x, y) {
+ var a = 3;
+ var b = 4;
+ h(a, b);
+};
+
+function g2(x, y) {
+ var a = 5;
+ var b = 6;
+ g3(a, b);
+};
+
+function g1(x, y) {
+ var a = 7;
+ var b = 8;
+ g2(a, b);
+};
+
+function f(x, y) {
+ var a = 9;
+ var b = 10;
+ g1(a, b);
+};
+
+f(11, 12);
+
+// Make sure that the debug event listener vas invoked.
+assertFalse(exception, "exception in listener " + exception)
+assertTrue(listenerComplete);
+
+Debug.setListener(null);
diff --git a/deps/v8/test/mjsunit/element-kind.js b/deps/v8/test/mjsunit/element-kind.js
new file mode 100644
index 000000000..48a029f27
--- /dev/null
+++ b/deps/v8/test/mjsunit/element-kind.js
@@ -0,0 +1,102 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+// Test element kind of objects
+
+var element_kind = {
+ fast_elements : 1,
+ fast_double_elements : 2,
+ dictionary_elements : 3,
+ external_byte_elements : 4,
+ external_unsigned_byte_elements : 5,
+ external_short_elements : 6,
+ external_unsigned_short_elements : 7,
+ external_int_elements : 8,
+ external_unsigned_int_elements : 9,
+ external_float_elements : 10,
+ external_double_elements : 11,
+ external_pixel_elements : 12
+}
+
+// We expect an object to only be of one element kind.
+function assertKind(expected, obj){
+ assertEquals(expected == element_kind.fast_elements,
+ %HasFastElements(obj));
+ assertEquals(expected == element_kind.fast_double_elements,
+ %HasFastDoubleElements(obj));
+ assertEquals(expected == element_kind.dictionary_elements,
+ %HasDictionaryElements(obj));
+ assertEquals(expected == element_kind.external_byte_elements,
+ %HasExternalByteElements(obj));
+ assertEquals(expected == element_kind.external_unsigned_byte_elements,
+ %HasExternalUnsignedByteElements(obj));
+ assertEquals(expected == element_kind.external_short_elements,
+ %HasExternalShortElements(obj));
+ assertEquals(expected == element_kind.external_unsigned_short_elements,
+ %HasExternalUnsignedShortElements(obj));
+ assertEquals(expected == element_kind.external_int_elements,
+ %HasExternalIntElements(obj));
+ assertEquals(expected == element_kind.external_unsigned_int_elements,
+ %HasExternalUnsignedIntElements(obj));
+ assertEquals(expected == element_kind.external_float_elements,
+ %HasExternalFloatElements(obj));
+ assertEquals(expected == element_kind.external_double_elements,
+ %HasExternalDoubleElements(obj));
+ assertEquals(expected == element_kind.external_pixel_elements,
+ %HasExternalPixelElements(obj));
+ // every external kind is also an external array
+ assertEquals(expected >= element_kind.external_byte_elements,
+ %HasExternalArrayElements(obj));
+}
+
+var me = {};
+assertKind(element_kind.fast_elements, me);
+me.dance = 0xD15C0;
+me.drink = 0xC0C0A;
+assertKind(element_kind.fast_elements, me);
+
+var you = new Array();
+for(i = 0; i < 1337; i++) {
+ you[i] = i;
+}
+assertKind(element_kind.fast_elements, you);
+
+assertKind(element_kind.dictionary_elements, new Array(0xC0C0A));
+
+// fast_double_elements not yet available
+
+
+assertKind(element_kind.external_byte_elements, new Int8Array(9001));
+assertKind(element_kind.external_unsigned_byte_elements, new Uint8Array(007));
+assertKind(element_kind.external_short_elements, new Int16Array(666));
+assertKind(element_kind.external_unsigned_short_elements, new Uint16Array(42));
+assertKind(element_kind.external_int_elements, new Int32Array(0xF));
+assertKind(element_kind.external_unsigned_int_elements, new Uint32Array(23));
+assertKind(element_kind.external_float_elements, new Float32Array(7));
+assertKind(element_kind.external_double_elements, new Float64Array(0));
+assertKind(element_kind.external_pixel_elements, new PixelArray(512));
diff --git a/deps/v8/test/mjsunit/fuzz-natives.js b/deps/v8/test/mjsunit/fuzz-natives.js
index 719dc5821..ffa92684d 100644
--- a/deps/v8/test/mjsunit/fuzz-natives.js
+++ b/deps/v8/test/mjsunit/fuzz-natives.js
@@ -167,7 +167,8 @@ var knownProblems = {
"_SwapElements": true,
- // Performance critical function which cannot afford type checks.
+ // Performance critical functions which cannot afford type checks.
+ "_IsNativeOrStrictMode": true,
"_CallFunction": true,
// Tries to allocate based on argument, and (correctly) throws
diff --git a/deps/v8/test/mjsunit/mjsunit.status b/deps/v8/test/mjsunit/mjsunit.status
index 8bd0e82e0..8ffcafce6 100644
--- a/deps/v8/test/mjsunit/mjsunit.status
+++ b/deps/v8/test/mjsunit/mjsunit.status
@@ -1,4 +1,4 @@
-# Copyright 2008 the V8 project authors. All rights reserved.
+# Copyright 2011 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
@@ -31,6 +31,10 @@ prefix mjsunit
bugs: FAIL
##############################################################################
+# Fails.
+regress/regress-1119: FAIL
+
+##############################################################################
# Too slow in debug mode with --stress-opt
compiler/regress-stacktrace-methods: PASS, SKIP if $mode == debug
compiler/regress-funcaller: PASS, SKIP if $mode == debug
diff --git a/deps/v8/test/mjsunit/regress/regress-1360.js b/deps/v8/test/mjsunit/regress/regress-1360.js
new file mode 100644
index 000000000..bebad28bd
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1360.js
@@ -0,0 +1,39 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Check the receiver for the sort and replace functions to
+// Array.prototype.sort and String.prototype.replace.
+
+var global = this;
+function strict() { "use strict"; assertEquals(void 0, this); }
+function non_strict() { assertEquals(global, this); }
+
+[1,2,3].sort(strict);
+[1,2,3].sort(non_strict);
+
+"axc".replace("x", strict);
+"axc".replace("x", non_strict);
diff --git a/deps/v8/test/mjsunit/regress/regress-1513.js b/deps/v8/test/mjsunit/regress/regress-1513.js
new file mode 100644
index 000000000..06c5edf10
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1513.js
@@ -0,0 +1,44 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Deleting a mapped arguments property and adding it via
+// Object.defineProperty should not crash.
+
+function testcase() {
+ return (function (a, b, c) {
+ delete arguments[0];
+ Object.defineProperty(arguments, "0", {
+ value: 10,
+ writable: false,
+ enumerable: false,
+ configurable: false
+ });
+ assertEquals(10, arguments[0]);
+ }(0, 1, 2));
+}
+
+testcase();
diff --git a/deps/v8/test/mjsunit/regress/regress-1521.js b/deps/v8/test/mjsunit/regress/regress-1521.js
new file mode 100644
index 000000000..415db6780
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1521.js
@@ -0,0 +1,47 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Optimized variable access inside through a catch context should work.
+function test(x) {
+ try {
+ throw new Error();
+ } catch (e) {
+ var y = {f: 1};
+ var f = function () {
+ var z = y;
+ var g = function () {
+ if (y.f === z.f) return x;
+ };
+ %OptimizeFunctionOnNextCall(g);
+ return g;
+ }
+ assertEquals(3, f()());
+ }
+}
+
+test(3);
+
diff --git a/deps/v8/test/mjsunit/regress/regress-1528.js b/deps/v8/test/mjsunit/regress/regress-1528.js
new file mode 100644
index 000000000..2eb6be194
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1528.js
@@ -0,0 +1,40 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// With (or catch) scopes nested inside catch scopes should look at the
+// first outer non-catch scope to decide which closure to use when
+// allocating the new context.
+
+// Code below should not assert or crash.
+try {
+ fail;
+} catch (e) {
+ with({}) { // With scope inside catch scope.
+ // Dynamic declaration forces runtime lookup to observe the context chain.
+ eval('const x = 7');
+ }
+}
diff --git a/deps/v8/test/sputnik/README b/deps/v8/test/sputnik/README
index 50d721f36..36566340e 100644
--- a/deps/v8/test/sputnik/README
+++ b/deps/v8/test/sputnik/README
@@ -1,6 +1,6 @@
To run the sputniktests you must check out the test suite from
googlecode.com. The test expectations are currently relative to
-version 94. To get the tests run the following command within
+version 97. To get the tests run the following command within
v8/test/sputnik/
- svn co http://sputniktests.googlecode.com/svn/trunk/ -r94 sputniktests
+ svn co http://sputniktests.googlecode.com/svn/trunk/ -r97 sputniktests
diff --git a/deps/v8/test/sputnik/sputnik.status b/deps/v8/test/sputnik/sputnik.status
index 84c8cb261..82d8a61c7 100644
--- a/deps/v8/test/sputnik/sputnik.status
+++ b/deps/v8/test/sputnik/sputnik.status
@@ -28,6 +28,15 @@
prefix sputnik
def FAIL_OK = FAIL, OKAY
+############################### BUGS ###################################
+
+# A bound function should fail on access to 'caller' and 'arguments'.
+S15.3.4.5_A1: FAIL
+S15.3.4.5_A2: FAIL
+
+# '__proto__' should be treated as a normal property in JSON.
+S15.12.2_A1: FAIL
+
##################### DELIBERATE INCOMPATIBILITIES #####################
# This tests precision of trignometric functions. We're slightly off
@@ -40,6 +49,7 @@ S15.8.2.13_A23: PASS || FAIL_OK
# We allow calls to regexp exec() with no arguments to fail for
# compatibility reasons.
S15.10.6.2_A1_T16: FAIL_OK
+S15.10.6.2_A12: FAIL_OK
S15.10.6.3_A1_T16: FAIL_OK
# We are silent in some regexp cases where the spec wants us to give
@@ -126,16 +136,8 @@ S15.5.4.11_D1.1_T3: PASS || FAIL_OK
S12.6.4_D1: PASS || FAIL_OK
# We allow function declarations within statements
-S12.5_A9_T1: FAIL_OK
-S12.5_A9_T2: FAIL_OK
-# S12.6.2_A13_T3: FAIL_OK
-# S12.5_A9_T3: FAIL_OK
-# S12.6.1_A13_T3: FAIL_OK
-S12.1_A1: FAIL_OK
S12.6.2_A13_T1: FAIL_OK
S12.6.2_A13_T2: FAIL_OK
-S12.6.1_A13_T1: FAIL_OK
-S12.6.1_A13_T2: FAIL_OK
S12.6.4_A13_T1: FAIL_OK
S12.6.4_A13_T2: FAIL_OK
#S12.6.4_A13_T3: FAIL_OK
diff --git a/deps/v8/tools/gdb-v8-support.py b/deps/v8/tools/gdb-v8-support.py
new file mode 100644
index 000000000..0aa1d4946
--- /dev/null
+++ b/deps/v8/tools/gdb-v8-support.py
@@ -0,0 +1,154 @@
+# Copyright 2011 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+kSmiTag = 0
+kSmiTagSize = 1
+kSmiTagMask = (1 << kSmiTagSize) - 1
+
+
+kHeapObjectTag = 1
+kHeapObjectTagSize = 2
+kHeapObjectTagMask = (1 << kHeapObjectTagSize) - 1
+
+
+kFailureTag = 3
+kFailureTagSize = 2
+kFailureTagMask = (1 << kFailureTagSize) - 1
+
+
+kSmiShiftSize32 = 0
+kSmiValueSize32 = 31
+kSmiShiftBits32 = kSmiTagSize + kSmiShiftSize32
+
+
+kSmiShiftSize64 = 31
+kSmiValueSize64 = 32
+kSmiShiftBits64 = kSmiTagSize + kSmiShiftSize64
+
+
+kAllBits = 0xFFFFFFFF
+kTopBit32 = 0x80000000
+kTopBit64 = 0x8000000000000000
+
+
+t_u32 = gdb.lookup_type('unsigned int')
+t_u64 = gdb.lookup_type('unsigned long long')
+
+
+def has_smi_tag(v):
+ return v & kSmiTagMask == kSmiTag
+
+
+def has_failure_tag(v):
+ return v & kFailureTagMask == kFailureTag
+
+
+def has_heap_object_tag(v):
+ return v & kHeapObjectTagMask == kHeapObjectTag
+
+
+def raw_heap_object(v):
+ return v - kHeapObjectTag
+
+
+def smi_to_int_32(v):
+ v = v & kAllBits
+ if (v & kTopBit32) == kTopBit32:
+ return ((v & kAllBits) >> kSmiShiftBits32) - 2147483648
+ else:
+ return (v & kAllBits) >> kSmiShiftBits32
+
+
+def smi_to_int_64(v):
+ return (v >> kSmiShiftBits64)
+
+
+def decode_v8_value(v, bitness):
+ base_str = 'v8[%x]' % v
+ if has_smi_tag(v):
+ if bitness == 32:
+ return base_str + (" SMI(%d)" % smi_to_int_32(v))
+ else:
+ return base_str + (" SMI(%d)" % smi_to_int_64(v))
+ elif has_failure_tag(v):
+ return base_str + " (failure)"
+ elif has_heap_object_tag(v):
+ return base_str + (" H(0x%x)" % raw_heap_object(v))
+ else:
+ return base_str
+
+
+class V8ValuePrinter(object):
+ "Print a v8value."
+ def __init__(self, val):
+ self.val = val
+ def to_string(self):
+ if self.val.type.sizeof == 4:
+ v_u32 = self.val.cast(t_u32)
+ return decode_v8_value(int(v_u32), 32)
+ elif self.val.type.sizeof == 8:
+ v_u64 = self.val.cast(t_u64)
+ return decode_v8_value(int(v_u64), 64)
+ else:
+ return 'v8value?'
+ def display_hint(self):
+ return 'v8value'
+
+
+def v8_pretty_printers(val):
+ lookup_tag = val.type.tag
+ if lookup_tag == None:
+ return None
+ elif lookup_tag == 'v8value':
+ return V8ValuePrinter(val)
+ return None
+gdb.pretty_printers.append(v8_pretty_printers)
+
+
+def v8_to_int(v):
+ if v.type.sizeof == 4:
+ return int(v.cast(t_u32))
+ elif v.type.sizeof == 8:
+ return int(v.cast(t_u64))
+ else:
+ return '?'
+
+
+def v8_get_value(vstring):
+ v = gdb.parse_and_eval(vstring)
+ return v8_to_int(v)
+
+
+class V8PrintObject (gdb.Command):
+ """Prints a v8 object."""
+ def __init__ (self):
+ super (V8PrintObject, self).__init__ ("v8print", gdb.COMMAND_DATA)
+ def invoke (self, arg, from_tty):
+ v = v8_get_value(arg)
+ gdb.execute('call __gdb_print_v8_object(%d)' % v)
+V8PrintObject()
diff --git a/deps/v8/tools/grokdump.py b/deps/v8/tools/grokdump.py
index 313ee72a5..74521ad21 100755
--- a/deps/v8/tools/grokdump.py
+++ b/deps/v8/tools/grokdump.py
@@ -373,64 +373,64 @@ class MinidumpReader(object):
# };
# static P p;
INSTANCE_TYPES = {
- 64: "SYMBOL_TYPE",
- 68: "ASCII_SYMBOL_TYPE",
- 65: "CONS_SYMBOL_TYPE",
- 69: "CONS_ASCII_SYMBOL_TYPE",
- 66: "EXTERNAL_SYMBOL_TYPE",
- 74: "EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE",
- 70: "EXTERNAL_ASCII_SYMBOL_TYPE",
- 0: "STRING_TYPE",
- 4: "ASCII_STRING_TYPE",
- 1: "CONS_STRING_TYPE",
- 5: "CONS_ASCII_STRING_TYPE",
- 2: "EXTERNAL_STRING_TYPE",
- 10: "EXTERNAL_STRING_WITH_ASCII_DATA_TYPE",
- 6: "EXTERNAL_ASCII_STRING_TYPE",
- 6: "PRIVATE_EXTERNAL_ASCII_STRING_TYPE",
- 128: "MAP_TYPE",
- 129: "CODE_TYPE",
- 130: "ODDBALL_TYPE",
- 131: "JS_GLOBAL_PROPERTY_CELL_TYPE",
- 132: "HEAP_NUMBER_TYPE",
- 133: "FOREIGN_TYPE",
- 134: "BYTE_ARRAY_TYPE",
- 135: "EXTERNAL_BYTE_ARRAY_TYPE",
- 136: "EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE",
- 137: "EXTERNAL_SHORT_ARRAY_TYPE",
- 138: "EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE",
- 139: "EXTERNAL_INT_ARRAY_TYPE",
- 140: "EXTERNAL_UNSIGNED_INT_ARRAY_TYPE",
- 141: "EXTERNAL_FLOAT_ARRAY_TYPE",
- 142: "EXTERNAL_DOUBLE_ARRAY_TYPE",
- 143: "EXTERNAL_PIXEL_ARRAY_TYPE",
- 144: "FILLER_TYPE",
- 145: "ACCESSOR_INFO_TYPE",
- 146: "ACCESS_CHECK_INFO_TYPE",
- 147: "INTERCEPTOR_INFO_TYPE",
- 148: "CALL_HANDLER_INFO_TYPE",
- 149: "FUNCTION_TEMPLATE_INFO_TYPE",
- 150: "OBJECT_TEMPLATE_INFO_TYPE",
- 151: "SIGNATURE_INFO_TYPE",
- 152: "TYPE_SWITCH_INFO_TYPE",
- 153: "SCRIPT_TYPE",
- 154: "CODE_CACHE_TYPE",
- 155: "DEBUG_INFO_TYPE",
- 156: "BREAK_POINT_INFO_TYPE",
- 157: "FIXED_ARRAY_TYPE",
- 158: "SHARED_FUNCTION_INFO_TYPE",
- 159: "JS_MESSAGE_OBJECT_TYPE",
- 160: "JS_VALUE_TYPE",
- 161: "JS_OBJECT_TYPE",
- 162: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
- 163: "JS_GLOBAL_OBJECT_TYPE",
- 164: "JS_BUILTINS_OBJECT_TYPE",
- 165: "JS_GLOBAL_PROXY_TYPE",
- 166: "JS_ARRAY_TYPE",
- 167: "JS_PROXY_TYPE",
- 168: "JS_REGEXP_TYPE",
- 169: "JS_FUNCTION_TYPE",
- 170: "JS_FUNCTION_PROXY_TYPE",
+64: "SYMBOL_TYPE",
+68: "ASCII_SYMBOL_TYPE",
+65: "CONS_SYMBOL_TYPE",
+69: "CONS_ASCII_SYMBOL_TYPE",
+66: "EXTERNAL_SYMBOL_TYPE",
+74: "EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE",
+70: "EXTERNAL_ASCII_SYMBOL_TYPE",
+0: "STRING_TYPE",
+4: "ASCII_STRING_TYPE",
+1: "CONS_STRING_TYPE",
+5: "CONS_ASCII_STRING_TYPE",
+2: "EXTERNAL_STRING_TYPE",
+10: "EXTERNAL_STRING_WITH_ASCII_DATA_TYPE",
+6: "EXTERNAL_ASCII_STRING_TYPE",
+6: "PRIVATE_EXTERNAL_ASCII_STRING_TYPE",
+128: "MAP_TYPE",
+129: "CODE_TYPE",
+130: "ODDBALL_TYPE",
+131: "JS_GLOBAL_PROPERTY_CELL_TYPE",
+132: "HEAP_NUMBER_TYPE",
+133: "FOREIGN_TYPE",
+134: "BYTE_ARRAY_TYPE",
+135: "EXTERNAL_BYTE_ARRAY_TYPE",
+136: "EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE",
+137: "EXTERNAL_SHORT_ARRAY_TYPE",
+138: "EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE",
+139: "EXTERNAL_INT_ARRAY_TYPE",
+140: "EXTERNAL_UNSIGNED_INT_ARRAY_TYPE",
+141: "EXTERNAL_FLOAT_ARRAY_TYPE",
+143: "EXTERNAL_PIXEL_ARRAY_TYPE",
+145: "FILLER_TYPE",
+146: "ACCESSOR_INFO_TYPE",
+147: "ACCESS_CHECK_INFO_TYPE",
+148: "INTERCEPTOR_INFO_TYPE",
+149: "CALL_HANDLER_INFO_TYPE",
+150: "FUNCTION_TEMPLATE_INFO_TYPE",
+151: "OBJECT_TEMPLATE_INFO_TYPE",
+152: "SIGNATURE_INFO_TYPE",
+153: "TYPE_SWITCH_INFO_TYPE",
+154: "SCRIPT_TYPE",
+155: "CODE_CACHE_TYPE",
+156: "POLYMORPHIC_CODE_CACHE_TYPE",
+159: "FIXED_ARRAY_TYPE",
+160: "SHARED_FUNCTION_INFO_TYPE",
+161: "JS_MESSAGE_OBJECT_TYPE",
+162: "JS_VALUE_TYPE",
+163: "JS_OBJECT_TYPE",
+164: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
+165: "JS_GLOBAL_OBJECT_TYPE",
+166: "JS_BUILTINS_OBJECT_TYPE",
+167: "JS_GLOBAL_PROXY_TYPE",
+168: "JS_ARRAY_TYPE",
+169: "JS_PROXY_TYPE",
+170: "JS_REGEXP_TYPE",
+171: "JS_FUNCTION_TYPE",
+172: "JS_FUNCTION_PROXY_TYPE",
+157: "DEBUG_INFO_TYPE",
+158: "BREAK_POINT_INFO_TYPE",
}
diff --git a/deps/v8/tools/ll_prof.py b/deps/v8/tools/ll_prof.py
index 798b57493..58cbb9585 100755
--- a/deps/v8/tools/ll_prof.py
+++ b/deps/v8/tools/ll_prof.py
@@ -631,10 +631,10 @@ class TraceReader(object):
def ReadMmap(self, header, offset):
mmap_info = PERF_MMAP_EVENT_BODY_DESC.Read(self.trace,
offset + self.header_size)
- # Read null-padded filename.
+ # Read null-terminated filename.
filename = self.trace[offset + self.header_size + ctypes.sizeof(mmap_info):
- offset + header.size].rstrip(chr(0))
- mmap_info.filename = filename
+ offset + header.size]
+ mmap_info.filename = filename[:filename.find(chr(0))]
return mmap_info
def ReadSample(self, header, offset):