summaryrefslogtreecommitdiff
path: root/deps
diff options
context:
space:
mode:
authorRyan Dahl <ry@tinyclouds.org>2010-02-03 09:06:03 -0800
committerRyan Dahl <ry@tinyclouds.org>2010-02-03 09:07:02 -0800
commitc7cb4daa25966e4f9af3c6d5499d762736454da9 (patch)
tree27c6541f5a1207eb74797ed63a43126c9bf2ba81 /deps
parentc723acc72192334a62bea6ff4baa33aab0da50ad (diff)
downloadnode-c7cb4daa25966e4f9af3c6d5499d762736454da9.tar.gz
Upgrade V8 to 2.1.0
Diffstat (limited to 'deps')
-rw-r--r--deps/v8/AUTHORS1
-rw-r--r--deps/v8/ChangeLog29
-rw-r--r--deps/v8/SConstruct24
-rw-r--r--deps/v8/include/v8-debug.h41
-rw-r--r--deps/v8/samples/lineprocessor.cc425
-rwxr-xr-xdeps/v8/src/SConscript7
-rw-r--r--deps/v8/src/accessors.cc6
-rw-r--r--deps/v8/src/api.cc10
-rw-r--r--deps/v8/src/arm/assembler-arm-inl.h14
-rw-r--r--deps/v8/src/arm/assembler-arm.cc36
-rw-r--r--deps/v8/src/arm/assembler-arm.h16
-rw-r--r--deps/v8/src/arm/builtins-arm.cc61
-rw-r--r--deps/v8/src/arm/codegen-arm.cc949
-rw-r--r--deps/v8/src/arm/codegen-arm.h120
-rw-r--r--deps/v8/src/arm/constants-arm.h6
-rw-r--r--deps/v8/src/arm/debug-arm.cc2
-rw-r--r--deps/v8/src/arm/disasm-arm.cc52
-rw-r--r--deps/v8/src/arm/fast-codegen-arm.cc1742
-rw-r--r--deps/v8/src/arm/full-codegen-arm.cc1781
-rw-r--r--deps/v8/src/arm/ic-arm.cc25
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc63
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h19
-rw-r--r--deps/v8/src/arm/regexp-macro-assembler-arm.cc110
-rw-r--r--deps/v8/src/arm/regexp-macro-assembler-arm.h6
-rw-r--r--deps/v8/src/arm/simulator-arm.cc103
-rw-r--r--deps/v8/src/arm/simulator-arm.h8
-rw-r--r--deps/v8/src/arm/stub-cache-arm.cc551
-rw-r--r--deps/v8/src/arm/virtual-frame-arm.cc101
-rw-r--r--deps/v8/src/arm/virtual-frame-arm.h34
-rw-r--r--deps/v8/src/assembler.cc20
-rw-r--r--deps/v8/src/assembler.h5
-rw-r--r--deps/v8/src/ast.cc21
-rw-r--r--deps/v8/src/ast.h35
-rw-r--r--deps/v8/src/bootstrapper.cc20
-rw-r--r--deps/v8/src/builtins.cc218
-rw-r--r--deps/v8/src/builtins.h48
-rw-r--r--deps/v8/src/code-stubs.h4
-rw-r--r--deps/v8/src/codegen.cc43
-rw-r--r--deps/v8/src/codegen.h128
-rwxr-xr-x[-rw-r--r--]deps/v8/src/compiler.cc735
-rw-r--r--deps/v8/src/compiler.h37
-rw-r--r--deps/v8/src/data-flow.cc267
-rw-r--r--deps/v8/src/data-flow.h67
-rw-r--r--deps/v8/src/dateparser.cc32
-rw-r--r--deps/v8/src/debug-agent.cc4
-rw-r--r--deps/v8/src/debug-delay.js2
-rw-r--r--deps/v8/src/debug.cc110
-rw-r--r--deps/v8/src/debug.h37
-rw-r--r--deps/v8/src/disassembler.cc8
-rw-r--r--deps/v8/src/execution.cc18
-rw-r--r--deps/v8/src/execution.h1
-rw-r--r--deps/v8/src/factory.cc8
-rw-r--r--deps/v8/src/factory.h1
-rw-r--r--deps/v8/src/fast-codegen.cc947
-rw-r--r--deps/v8/src/fast-codegen.h338
-rw-r--r--deps/v8/src/flag-definitions.h20
-rw-r--r--deps/v8/src/frames.cc14
-rw-r--r--deps/v8/src/frames.h6
-rw-r--r--deps/v8/src/full-codegen.cc1155
-rw-r--r--deps/v8/src/full-codegen.h452
-rw-r--r--deps/v8/src/globals.h7
-rw-r--r--deps/v8/src/handles.cc44
-rw-r--r--deps/v8/src/handles.h15
-rw-r--r--deps/v8/src/heap-inl.h6
-rw-r--r--deps/v8/src/heap-profiler.cc3
-rw-r--r--deps/v8/src/heap.cc112
-rw-r--r--deps/v8/src/heap.h5
-rw-r--r--deps/v8/src/ia32/assembler-ia32.cc26
-rw-r--r--deps/v8/src/ia32/assembler-ia32.h8
-rw-r--r--deps/v8/src/ia32/builtins-ia32.cc65
-rw-r--r--deps/v8/src/ia32/codegen-ia32.cc2021
-rw-r--r--deps/v8/src/ia32/codegen-ia32.h154
-rw-r--r--deps/v8/src/ia32/debug-ia32.cc9
-rw-r--r--deps/v8/src/ia32/disasm-ia32.cc13
-rw-r--r--deps/v8/src/ia32/fast-codegen-ia32.cc1714
-rw-r--r--deps/v8/src/ia32/full-codegen-ia32.cc1900
-rw-r--r--deps/v8/src/ia32/ic-ia32.cc33
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc99
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h15
-rw-r--r--deps/v8/src/ia32/regexp-macro-assembler-ia32.cc75
-rw-r--r--deps/v8/src/ia32/regexp-macro-assembler-ia32.h6
-rw-r--r--deps/v8/src/ia32/simulator-ia32.h4
-rw-r--r--deps/v8/src/ia32/stub-cache-ia32.cc144
-rw-r--r--deps/v8/src/ia32/virtual-frame-ia32.cc36
-rw-r--r--deps/v8/src/ic.cc30
-rw-r--r--deps/v8/src/ic.h13
-rw-r--r--deps/v8/src/json-delay.js2
-rw-r--r--deps/v8/src/jsregexp.cc5
-rw-r--r--deps/v8/src/list.h3
-rw-r--r--deps/v8/src/log.cc122
-rw-r--r--deps/v8/src/log.h23
-rw-r--r--deps/v8/src/macros.py1
-rw-r--r--deps/v8/src/mark-compact.cc76
-rw-r--r--deps/v8/src/mark-compact.h3
-rw-r--r--deps/v8/src/messages.js3
-rw-r--r--deps/v8/src/mirror-delay.js18
-rw-r--r--deps/v8/src/mksnapshot.cc6
-rw-r--r--deps/v8/src/objects-inl.h21
-rw-r--r--deps/v8/src/objects.cc81
-rw-r--r--deps/v8/src/objects.h32
-rw-r--r--deps/v8/src/parser.cc372
-rw-r--r--deps/v8/src/parser.h3
-rw-r--r--deps/v8/src/platform-freebsd.cc24
-rw-r--r--deps/v8/src/platform-linux.cc30
-rw-r--r--deps/v8/src/platform-macos.cc12
-rw-r--r--deps/v8/src/platform-openbsd.cc6
-rw-r--r--deps/v8/src/platform-posix.cc7
-rw-r--r--deps/v8/src/platform-solaris.cc131
-rw-r--r--deps/v8/src/platform-win32.cc12
-rw-r--r--deps/v8/src/platform.h35
-rw-r--r--deps/v8/src/prettyprinter.cc55
-rw-r--r--deps/v8/src/prettyprinter.h3
-rw-r--r--deps/v8/src/regexp-macro-assembler.cc38
-rw-r--r--deps/v8/src/regexp-macro-assembler.h12
-rw-r--r--deps/v8/src/runtime.cc448
-rw-r--r--deps/v8/src/runtime.h15
-rw-r--r--deps/v8/src/runtime.js4
-rwxr-xr-x[-rw-r--r--]deps/v8/src/scanner.cc211
-rw-r--r--deps/v8/src/scanner.h58
-rw-r--r--deps/v8/src/serialize.cc391
-rw-r--r--deps/v8/src/serialize.h169
-rw-r--r--deps/v8/src/snapshot-common.cc35
-rw-r--r--deps/v8/src/spaces-inl.h26
-rw-r--r--deps/v8/src/spaces.cc76
-rw-r--r--deps/v8/src/spaces.h81
-rw-r--r--deps/v8/src/stub-cache.cc13
-rw-r--r--deps/v8/src/stub-cache.h4
-rw-r--r--deps/v8/src/v8-counters.h72
-rw-r--r--deps/v8/src/v8natives.js59
-rw-r--r--deps/v8/src/version.cc6
-rw-r--r--deps/v8/src/x64/assembler-x64.cc58
-rw-r--r--deps/v8/src/x64/assembler-x64.h16
-rw-r--r--deps/v8/src/x64/builtins-x64.cc66
-rw-r--r--deps/v8/src/x64/codegen-x64.cc2390
-rw-r--r--deps/v8/src/x64/codegen-x64.h223
-rw-r--r--deps/v8/src/x64/debug-x64.cc9
-rw-r--r--deps/v8/src/x64/disasm-x64.cc30
-rw-r--r--deps/v8/src/x64/fast-codegen-x64.cc1730
-rw-r--r--deps/v8/src/x64/full-codegen-x64.cc1907
-rw-r--r--deps/v8/src/x64/ic-x64.cc37
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc177
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h52
-rw-r--r--deps/v8/src/x64/regexp-macro-assembler-x64.cc142
-rw-r--r--deps/v8/src/x64/regexp-macro-assembler-x64.h29
-rw-r--r--deps/v8/src/x64/simulator-x64.h4
-rw-r--r--deps/v8/src/x64/stub-cache-x64.cc224
-rw-r--r--deps/v8/src/x64/virtual-frame-x64.cc38
-rw-r--r--deps/v8/src/x64/virtual-frame-x64.h2
-rw-r--r--deps/v8/test/cctest/test-api.cc248
-rw-r--r--deps/v8/test/cctest/test-assembler-arm.cc61
-rw-r--r--deps/v8/test/cctest/test-compiler.cc4
-rw-r--r--deps/v8/test/cctest/test-debug.cc407
-rw-r--r--deps/v8/test/cctest/test-disasm-ia32.cc2
-rw-r--r--deps/v8/test/cctest/test-heap-profiler.cc8
-rw-r--r--deps/v8/test/cctest/test-heap.cc5
-rw-r--r--deps/v8/test/cctest/test-log-stack-tracer.cc7
-rw-r--r--deps/v8/test/cctest/test-log.cc7
-rw-r--r--deps/v8/test/cctest/test-mark-compact.cc30
-rw-r--r--deps/v8/test/cctest/test-regexp.cc205
-rw-r--r--deps/v8/test/cctest/test-serialize.cc210
-rw-r--r--deps/v8/test/es5conform/README2
-rw-r--r--deps/v8/test/es5conform/es5conform.status112
-rw-r--r--deps/v8/test/mjsunit/compiler/short-circuit.js102
-rw-r--r--deps/v8/test/mjsunit/compiler/thisfunction.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/unary-add.js67
-rw-r--r--deps/v8/test/mjsunit/debug-compile-event-newfunction.js68
-rw-r--r--deps/v8/test/mjsunit/debug-compile-event.js2
-rw-r--r--deps/v8/test/mjsunit/debug-step.js2
-rw-r--r--deps/v8/test/mjsunit/for.js (renamed from deps/v8/test/mjsunit/bugs/bug-223.js)19
-rw-r--r--deps/v8/test/mjsunit/json.js98
-rw-r--r--deps/v8/test/mjsunit/math-round.js52
-rw-r--r--deps/v8/test/mjsunit/mirror-script.js4
-rw-r--r--deps/v8/test/mjsunit/mjsunit.js1
-rw-r--r--deps/v8/test/mjsunit/mjsunit.status4
-rw-r--r--deps/v8/test/mjsunit/object-get-own-property-names.js104
-rw-r--r--deps/v8/test/mjsunit/regress/regress-580.js55
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-3184.js83
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-3867.js77
-rw-r--r--deps/v8/test/mjsunit/tools/csvparser.js4
-rw-r--r--deps/v8/test/mjsunit/tools/logreader.js2
-rw-r--r--deps/v8/test/mjsunit/tools/tickprocessor-test.func-info29
-rw-r--r--deps/v8/test/mjsunit/tools/tickprocessor-test.log27
-rw-r--r--deps/v8/test/mjsunit/tools/tickprocessor.js7
-rw-r--r--deps/v8/test/mjsunit/value-wrapper.js164
-rw-r--r--deps/v8/tools/codemap.js12
-rw-r--r--deps/v8/tools/csvparser.js59
-rw-r--r--deps/v8/tools/gyp/v8.gyp7
-rw-r--r--deps/v8/tools/logreader.js5
-rw-r--r--deps/v8/tools/profile.js70
-rwxr-xr-xdeps/v8/tools/test.py14
-rw-r--r--deps/v8/tools/tickprocessor-driver.js8
-rw-r--r--deps/v8/tools/tickprocessor.js136
-rw-r--r--deps/v8/tools/tickprocessor.py44
-rw-r--r--deps/v8/tools/visual_studio/common.vsprops1
-rw-r--r--deps/v8/tools/visual_studio/d8.vcproj6
-rw-r--r--deps/v8/tools/visual_studio/d8_arm.vcproj392
-rw-r--r--deps/v8/tools/visual_studio/d8_x64.vcproj24
-rw-r--r--deps/v8/tools/visual_studio/ia32.vsprops4
-rw-r--r--deps/v8/tools/visual_studio/v8_arm.vcproj446
-rw-r--r--deps/v8/tools/visual_studio/v8_base.vcproj20
-rw-r--r--deps/v8/tools/visual_studio/v8_base_arm.vcproj20
-rw-r--r--deps/v8/tools/visual_studio/v8_base_x64.vcproj33
-rw-r--r--deps/v8/tools/visual_studio/v8_cctest.vcproj6
-rw-r--r--deps/v8/tools/visual_studio/v8_cctest_arm.vcproj6
-rw-r--r--deps/v8/tools/visual_studio/v8_cctest_x64.vcproj12
-rw-r--r--deps/v8/tools/visual_studio/v8_mksnapshot.vcproj6
-rw-r--r--deps/v8/tools/visual_studio/v8_mksnapshot_x64.vcproj6
-rw-r--r--deps/v8/tools/visual_studio/v8_process_sample.vcproj6
-rw-r--r--deps/v8/tools/visual_studio/v8_process_sample_arm.vcproj296
-rw-r--r--deps/v8/tools/visual_studio/v8_process_sample_x64.vcproj22
-rw-r--r--deps/v8/tools/visual_studio/v8_shell_sample.vcproj6
-rw-r--r--deps/v8/tools/visual_studio/v8_shell_sample_arm.vcproj296
-rw-r--r--deps/v8/tools/visual_studio/v8_shell_sample_x64.vcproj24
-rw-r--r--deps/v8/tools/visual_studio/x64.vsprops4
214 files changed, 20083 insertions, 11827 deletions
diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS
index af0ecded7..5d712fc27 100644
--- a/deps/v8/AUTHORS
+++ b/deps/v8/AUTHORS
@@ -10,6 +10,7 @@ Alexandre Vassalotti <avassalotti@gmail.com>
Craig Schlenter <craig.schlenter@gmail.com>
Daniel Andersson <kodandersson@gmail.com>
Daniel James <dnljms@gmail.com>
+Erich Ocean <erich.ocean@me.com>
Jan de Mooij <jandemooij@gmail.com>
Jay Freeman <saurik@saurik.com>
Joel Stanley <joel.stan@gmail.com>
diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog
index 192dd2500..29ecccd7d 100644
--- a/deps/v8/ChangeLog
+++ b/deps/v8/ChangeLog
@@ -1,3 +1,32 @@
+2010-02-03: Version 2.1.0
+
+ Values are now always wrapped in objects when used as a receiver.
+ (issue 223).
+
+ [ES5] Implemented Object.getOwnPropertyNames.
+
+ [ES5] Restrict JSON.parse to only accept strings that conforms to the
+ JSON grammar.
+
+ Improvement of debugger agent (issue 549 and 554).
+
+ Fixed problem with skipped stack frame in profiles (issue 553).
+
+ Solaris support by Erich Ocean <erich.ocean@me.com> and Ryan Dahl
+ <ry@tinyclouds.org>.
+
+ Fix a bug that Math.round() returns incorrect results for huge
+ integers.
+
+ Fix enumeration order for objects created from some constructor
+ functions (isue http://crbug.com/3867).
+
+ Fix arithmetic on some integer constants (issue 580).
+
+ Numerous performance improvements including porting of previous IA-32
+ optimizations to x64 and ARM architectures.
+
+
2010-01-14: Version 2.0.6
Added ES5 Object.getPrototypeOf, GetOwnPropertyDescriptor,
diff --git a/deps/v8/SConstruct b/deps/v8/SConstruct
index d68cec7c5..98fc22fba 100644
--- a/deps/v8/SConstruct
+++ b/deps/v8/SConstruct
@@ -280,18 +280,12 @@ V8_EXTRA_FLAGS = {
},
'msvc': {
'all': {
- 'WARNINGFLAGS': ['/WX', '/wd4355', '/wd4800']
+ 'WARNINGFLAGS': ['/W3', '/WX', '/wd4355', '/wd4800']
},
'library:shared': {
'CPPDEFINES': ['BUILDING_V8_SHARED'],
'LIBS': ['winmm', 'ws2_32']
},
- 'arch:ia32': {
- 'WARNINGFLAGS': ['/W3']
- },
- 'arch:x64': {
- 'WARNINGFLAGS': ['/W3']
- },
'arch:arm': {
'CPPDEFINES': ['V8_TARGET_ARCH_ARM'],
# /wd4996 is to silence the warning about sscanf
@@ -317,7 +311,8 @@ MKSNAPSHOT_EXTRA_FLAGS = {
'LIBS': ['execinfo', 'pthread']
},
'os:solaris': {
- 'LIBS': ['pthread', 'socket', 'nsl', 'rt']
+ 'LIBS': ['m', 'pthread', 'socket', 'nsl', 'rt'],
+ 'LINKFLAGS': ['-mt']
},
'os:openbsd': {
'LIBS': ['execinfo', 'pthread']
@@ -369,7 +364,8 @@ CCTEST_EXTRA_FLAGS = {
'LIBS': ['execinfo', 'pthread']
},
'os:solaris': {
- 'LIBS': ['pthread', 'socket', 'nsl', 'rt']
+ 'LIBS': ['m', 'pthread', 'socket', 'nsl', 'rt'],
+ 'LINKFLAGS': ['-mt']
},
'os:openbsd': {
'LIBS': ['execinfo', 'pthread']
@@ -431,7 +427,8 @@ SAMPLE_FLAGS = {
},
'os:solaris': {
'LIBPATH' : ['/usr/local/lib'],
- 'LIBS': ['pthread', 'socket', 'nsl', 'rt']
+ 'LIBS': ['m', 'pthread', 'socket', 'nsl', 'rt'],
+ 'LINKFLAGS': ['-mt']
},
'os:openbsd': {
'LIBPATH' : ['/usr/local/lib'],
@@ -543,7 +540,8 @@ D8_FLAGS = {
'LIBS': ['pthread'],
},
'os:solaris': {
- 'LIBS': ['pthread', 'socket', 'nsl', 'rt']
+ 'LIBS': ['m', 'pthread', 'socket', 'nsl', 'rt'],
+ 'LINKFLAGS': ['-mt']
},
'os:openbsd': {
'LIBS': ['pthread'],
@@ -693,7 +691,7 @@ SIMPLE_OPTIONS = {
def GetOptions():
result = Options()
result.Add('mode', 'compilation mode (debug, release)', 'release')
- result.Add('sample', 'build sample (shell, process)', '')
+ result.Add('sample', 'build sample (shell, process, lineprocessor)', '')
result.Add('env', 'override environment settings (NAME0:value0,NAME1:value1,...)', '')
result.Add('importenv', 'import environment settings (NAME0,NAME1,...)', '')
for (name, option) in SIMPLE_OPTIONS.iteritems():
@@ -761,7 +759,7 @@ def IsLegal(env, option, values):
def VerifyOptions(env):
if not IsLegal(env, 'mode', ['debug', 'release']):
return False
- if not IsLegal(env, 'sample', ["shell", "process"]):
+ if not IsLegal(env, 'sample', ["shell", "process", "lineprocessor"]):
return False
if not IsLegal(env, 'regexp', ["native", "interpreted"]):
return False
diff --git a/deps/v8/include/v8-debug.h b/deps/v8/include/v8-debug.h
index 10b41e236..2e5fb3fde 100644
--- a/deps/v8/include/v8-debug.h
+++ b/deps/v8/include/v8-debug.h
@@ -224,9 +224,11 @@ class EXPORT Debug {
* be processed. Note that debug messages will only be processed if there is
* a V8 break. This can happen automatically by using the option
* --debugger-auto-break.
+ * \param provide_locker requires that V8 acquires v8::Locker for you before
+ * calling handler
*/
static void SetDebugMessageDispatchHandler(
- DebugMessageDispatchHandler handler);
+ DebugMessageDispatchHandler handler, bool provide_locker = false);
/**
* Run a JavaScript function in the debugger.
@@ -263,6 +265,43 @@ class EXPORT Debug {
*/
static bool EnableAgent(const char* name, int port,
bool wait_for_connection = false);
+
+ /**
+ * Makes V8 process all pending debug messages.
+ *
+ * From V8 point of view all debug messages come asynchronously (e.g. from
+ * remote debugger) but they all must be handled synchronously: V8 cannot
+ * do 2 things at one time so normal script execution must be interrupted
+ * for a while.
+ *
+ * Generally when message arrives V8 may be in one of 3 states:
+ * 1. V8 is running script; V8 will automatically interrupt and process all
+ * pending messages (however auto_break flag should be enabled);
+ * 2. V8 is suspended on debug breakpoint; in this state V8 is dedicated
+ * to reading and processing debug messages;
+ * 3. V8 is not running at all or has called some long-working C++ function;
+ * by default it means that processing of all debug message will be deferred
+ * until V8 gets control again; however, embedding application may improve
+ * this by manually calling this method.
+ *
+ * It makes sense to call this method whenever a new debug message arrived and
+ * V8 is not already running. Method v8::Debug::SetDebugMessageDispatchHandler
+ * should help with the former condition.
+ *
+ * Technically this method in many senses is equivalent to executing empty
+ * script:
+ * 1. It does nothing except for processing all pending debug messages.
+ * 2. It should be invoked with the same precautions and from the same context
+ * as V8 script would be invoked from, because:
+ * a. with "evaluate" command it can do whatever normal script can do,
+ * including all native calls;
+ * b. no other thread should call V8 while this method is running
+ * (v8::Locker may be used here).
+ *
+ * "Evaluate" debug command behavior currently is not specified in scope
+ * of this method.
+ */
+ static void ProcessDebugMessages();
};
diff --git a/deps/v8/samples/lineprocessor.cc b/deps/v8/samples/lineprocessor.cc
new file mode 100644
index 000000000..505dabf94
--- /dev/null
+++ b/deps/v8/samples/lineprocessor.cc
@@ -0,0 +1,425 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <v8.h>
+#include <v8-debug.h>
+#include <fcntl.h>
+#include <string.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+/**
+ * This sample program should demonstrate certain aspects of debugging
+ * standalone V8-based application.
+ *
+ * The program reads input stream, processes it line by line and print
+ * the result to output. The actual processing is done by custom JavaScript
+ * script. The script is specified with command line parameters.
+ *
+ * The main cycle of the program will sequentially read lines from standard
+ * input, process them and print to standard output until input closes.
+ * There are 2 possible configuration in regard to main cycle.
+ *
+ * 1. The main cycle is on C++ side. Program should be run with
+ * --main-cycle-in-cpp option. Script must declare a function named
+ * "ProcessLine". The main cycle in C++ reads lines and calls this function
+ * for processing every time. This is a sample script:
+
+function ProcessLine(input_line) {
+ return ">>>" + input_line + "<<<";
+}
+
+ *
+ * 2. The main cycle is in JavaScript. Program should be run with
+ * --main-cycle-in-js option. Script gets run one time at all and gets
+ * API of 2 global functions: "read_line" and "print". It should read input
+ * and print converted lines to output itself. This a sample script:
+
+while (true) {
+ var line = read_line();
+ if (!line) {
+ break;
+ }
+ var res = line + " | " + line;
+ print(res);
+}
+
+ *
+ * When run with "-p" argument, the program starts V8 Debugger Agent and
+ * allows remote debugger to attach and debug JavaScript code.
+ *
+ * Interesting aspects:
+ * 1. Wait for remote debugger to attach
+ * Normally the program compiles custom script and immediately runs it.
+ * If programmer needs to debug script from the very beginning, he should
+ * run this sample program with "--wait-for-connection" command line parameter.
+ * This way V8 will suspend on the first statement and wait for
+ * debugger to attach.
+ *
+ * 2. Unresponsive V8
+ * V8 Debugger Agent holds a connection with remote debugger, but it does
+ * respond only when V8 is running some script. In particular, when this program
+ * is waiting for input, all requests from debugger get deferred until V8
+ * is called again. See how "--callback" command-line parameter in this sample
+ * fixes this issue.
+ */
+
+enum MainCycleType {
+ CycleInCpp,
+ CycleInJs
+};
+
+const char* ToCString(const v8::String::Utf8Value& value);
+void ReportException(v8::TryCatch* handler);
+v8::Handle<v8::String> ReadFile(const char* name);
+v8::Handle<v8::String> ReadLine();
+
+v8::Handle<v8::Value> Print(const v8::Arguments& args);
+v8::Handle<v8::Value> ReadLine(const v8::Arguments& args);
+bool RunCppCycle(v8::Handle<v8::Script> script, v8::Local<v8::Context> context,
+ bool report_exceptions);
+
+v8::Persistent<v8::Context> debug_message_context;
+
+
+void DispatchDebugMessages() {
+ // We are in some random thread. We should already have v8::Locker acquired
+ // (we requested this when registered this callback). We was called
+ // because new debug messages arrived; they may have already been processed,
+ // but we shouldn't worry about this.
+ //
+ // All we have to do is to set context and call ProcessDebugMessages.
+ //
+ // We should decide which V8 context to use here. This is important for
+ // "evaluate" command, because it must be executed some context.
+ // In our sample we have only one context, so there is nothing really to
+ // think about.
+ v8::Context::Scope scope(debug_message_context);
+
+ v8::Debug::ProcessDebugMessages();
+}
+
+
+int RunMain(int argc, char* argv[]) {
+ v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
+ v8::HandleScope handle_scope;
+
+ v8::Handle<v8::String> script_source(NULL);
+ v8::Handle<v8::Value> script_name(NULL);
+ int script_param_counter = 0;
+
+ int port_number = -1;
+ bool wait_for_connection = false;
+ bool support_callback = false;
+ MainCycleType cycle_type = CycleInCpp;
+
+ for (int i = 1; i < argc; i++) {
+ const char* str = argv[i];
+ if (strcmp(str, "-f") == 0) {
+ // Ignore any -f flags for compatibility with the other stand-
+ // alone JavaScript engines.
+ continue;
+ } else if (strcmp(str, "--callback") == 0) {
+ support_callback = true;
+ } else if (strcmp(str, "--wait-for-connection") == 0) {
+ wait_for_connection = true;
+ } else if (strcmp(str, "--main-cycle-in-cpp") == 0) {
+ cycle_type = CycleInCpp;
+ } else if (strcmp(str, "--main-cycle-in-js") == 0) {
+ cycle_type = CycleInJs;
+ } else if (strcmp(str, "-p") == 0 && i + 1 < argc) {
+ port_number = atoi(argv[i + 1]);
+ i++;
+ } else if (strncmp(str, "--", 2) == 0) {
+ printf("Warning: unknown flag %s.\nTry --help for options\n", str);
+ } else if (strcmp(str, "-e") == 0 && i + 1 < argc) {
+ script_source = v8::String::New(argv[i + 1]);
+ script_name = v8::String::New("unnamed");
+ i++;
+ script_param_counter++;
+ } else {
+ // Use argument as a name of file to load.
+ script_source = ReadFile(str);
+ script_name = v8::String::New(str);
+ if (script_source.IsEmpty()) {
+ printf("Error reading '%s'\n", str);
+ return 1;
+ }
+ script_param_counter++;
+ }
+ }
+
+ if (script_param_counter == 0) {
+ printf("Script is not specified\n");
+ return 1;
+ }
+ if (script_param_counter != 1) {
+ printf("Only one script may be specified\n");
+ return 1;
+ }
+
+ // Create a template for the global object.
+ v8::Handle<v8::ObjectTemplate> global = v8::ObjectTemplate::New();
+
+ // Bind the global 'print' function to the C++ Print callback.
+ global->Set(v8::String::New("print"), v8::FunctionTemplate::New(Print));
+
+ if (cycle_type == CycleInJs) {
+ // Bind the global 'read_line' function to the C++ Print callback.
+ global->Set(v8::String::New("read_line"),
+ v8::FunctionTemplate::New(ReadLine));
+ }
+
+ // Create a new execution environment containing the built-in
+ // functions
+ v8::Handle<v8::Context> context = v8::Context::New(NULL, global);
+ debug_message_context = v8::Persistent<v8::Context>::New(context);
+
+
+ // Enter the newly created execution environment.
+ v8::Context::Scope context_scope(context);
+
+ v8::Locker locker;
+
+ if (support_callback) {
+ v8::Debug::SetDebugMessageDispatchHandler(DispatchDebugMessages, true);
+ }
+
+ if (port_number != -1) {
+ const char* auto_break_param = "--debugger_auto_break";
+ v8::V8::SetFlagsFromString(auto_break_param, strlen(auto_break_param));
+ v8::Debug::EnableAgent("lineprocessor", port_number, wait_for_connection);
+ }
+
+ bool report_exceptions = true;
+
+ v8::Handle<v8::Script> script;
+ {
+ // Compile script in try/catch context.
+ v8::TryCatch try_catch;
+ script = v8::Script::Compile(script_source, script_name);
+ if (script.IsEmpty()) {
+ // Print errors that happened during compilation.
+ if (report_exceptions)
+ ReportException(&try_catch);
+ return 1;
+ }
+ }
+
+ {
+ v8::TryCatch try_catch;
+
+ script->Run();
+ if (try_catch.HasCaught()) {
+ if (report_exceptions)
+ ReportException(&try_catch);
+ return 1;
+ }
+ }
+
+ if (cycle_type == CycleInCpp) {
+ bool res = RunCppCycle(script, v8::Context::GetCurrent(),
+ report_exceptions);
+ return !res;
+ } else {
+ // All is already done.
+ }
+ return 0;
+}
+
+
+bool RunCppCycle(v8::Handle<v8::Script> script, v8::Local<v8::Context> context,
+ bool report_exceptions) {
+ v8::Locker lock;
+
+ v8::Handle<v8::String> fun_name = v8::String::New("ProcessLine");
+ v8::Handle<v8::Value> process_val =
+ v8::Context::GetCurrent()->Global()->Get(fun_name);
+
+ // If there is no Process function, or if it is not a function,
+ // bail out
+ if (!process_val->IsFunction()) {
+ printf("Error: Script does not declare 'ProcessLine' global function.\n");
+ return 1;
+ }
+
+ // It is a function; cast it to a Function
+ v8::Handle<v8::Function> process_fun =
+ v8::Handle<v8::Function>::Cast(process_val);
+
+
+ while (!feof(stdin)) {
+ v8::HandleScope handle_scope;
+
+ v8::Handle<v8::String> input_line = ReadLine();
+ if (input_line == v8::Undefined()) {
+ continue;
+ }
+
+ const int argc = 1;
+ v8::Handle<v8::Value> argv[argc] = { input_line };
+
+ v8::Handle<v8::Value> result;
+ {
+ v8::TryCatch try_catch;
+ result = process_fun->Call(v8::Context::GetCurrent()->Global(),
+ argc, argv);
+ if (try_catch.HasCaught()) {
+ if (report_exceptions)
+ ReportException(&try_catch);
+ return false;
+ }
+ }
+ v8::String::Utf8Value str(result);
+ const char* cstr = ToCString(str);
+ printf("%s\n", cstr);
+ }
+
+ return true;
+}
+
+int main(int argc, char* argv[]) {
+ int result = RunMain(argc, argv);
+ v8::V8::Dispose();
+ return result;
+}
+
+
+// Extracts a C string from a V8 Utf8Value.
+const char* ToCString(const v8::String::Utf8Value& value) {
+ return *value ? *value : "<string conversion failed>";
+}
+
+
+// Reads a file into a v8 string.
+v8::Handle<v8::String> ReadFile(const char* name) {
+ FILE* file = fopen(name, "rb");
+ if (file == NULL) return v8::Handle<v8::String>();
+
+ fseek(file, 0, SEEK_END);
+ int size = ftell(file);
+ rewind(file);
+
+ char* chars = new char[size + 1];
+ chars[size] = '\0';
+ for (int i = 0; i < size;) {
+ int read = fread(&chars[i], 1, size - i, file);
+ i += read;
+ }
+ fclose(file);
+ v8::Handle<v8::String> result = v8::String::New(chars, size);
+ delete[] chars;
+ return result;
+}
+
+
+void ReportException(v8::TryCatch* try_catch) {
+ v8::HandleScope handle_scope;
+ v8::String::Utf8Value exception(try_catch->Exception());
+ const char* exception_string = ToCString(exception);
+ v8::Handle<v8::Message> message = try_catch->Message();
+ if (message.IsEmpty()) {
+ // V8 didn't provide any extra information about this error; just
+ // print the exception.
+ printf("%s\n", exception_string);
+ } else {
+ // Print (filename):(line number): (message).
+ v8::String::Utf8Value filename(message->GetScriptResourceName());
+ const char* filename_string = ToCString(filename);
+ int linenum = message->GetLineNumber();
+ printf("%s:%i: %s\n", filename_string, linenum, exception_string);
+ // Print line of source code.
+ v8::String::Utf8Value sourceline(message->GetSourceLine());
+ const char* sourceline_string = ToCString(sourceline);
+ printf("%s\n", sourceline_string);
+ // Print wavy underline (GetUnderline is deprecated).
+ int start = message->GetStartColumn();
+ for (int i = 0; i < start; i++) {
+ printf(" ");
+ }
+ int end = message->GetEndColumn();
+ for (int i = start; i < end; i++) {
+ printf("^");
+ }
+ printf("\n");
+ }
+}
+
+
+// The callback that is invoked by v8 whenever the JavaScript 'print'
+// function is called. Prints its arguments on stdout separated by
+// spaces and ending with a newline.
+v8::Handle<v8::Value> Print(const v8::Arguments& args) {
+ bool first = true;
+ for (int i = 0; i < args.Length(); i++) {
+ v8::HandleScope handle_scope;
+ if (first) {
+ first = false;
+ } else {
+ printf(" ");
+ }
+ v8::String::Utf8Value str(args[i]);
+ const char* cstr = ToCString(str);
+ printf("%s", cstr);
+ }
+ printf("\n");
+ fflush(stdout);
+ return v8::Undefined();
+}
+
+
+// The callback that is invoked by v8 whenever the JavaScript 'read_line'
+// function is called. Reads a string from standard input and returns.
+v8::Handle<v8::Value> ReadLine(const v8::Arguments& args) {
+ if (args.Length() > 0) {
+ return v8::ThrowException(v8::String::New("Unexpected arguments"));
+ }
+ return ReadLine();
+}
+
+v8::Handle<v8::String> ReadLine() {
+ const int kBufferSize = 1024 + 1;
+ char buffer[kBufferSize];
+
+ char* res;
+ {
+ v8::Unlocker unlocker;
+ res = fgets(buffer, kBufferSize, stdin);
+ }
+ if (res == NULL) {
+ v8::Handle<v8::Primitive> t = v8::Undefined();
+ return reinterpret_cast<v8::Handle<v8::String>&>(t);
+ }
+ // remove newline char
+ for (char* pos = buffer; *pos != '\0'; pos++) {
+ if (*pos == '\n') {
+ *pos = '\0';
+ break;
+ }
+ }
+ return v8::String::New(buffer);
+}
diff --git a/deps/v8/src/SConscript b/deps/v8/src/SConscript
index 9f8e4190b..ebda77ac2 100755
--- a/deps/v8/src/SConscript
+++ b/deps/v8/src/SConscript
@@ -50,6 +50,7 @@ SOURCES = {
contexts.cc
conversions.cc
counters.cc
+ data-flow.cc
dateparser.cc
debug-agent.cc
debug.cc
@@ -60,6 +61,7 @@ SOURCES = {
flags.cc
frame-element.cc
frames.cc
+ full-codegen.cc
func-name-inferrer.cc
global-handles.cc
handles.cc
@@ -114,6 +116,7 @@ SOURCES = {
arm/disasm-arm.cc
arm/fast-codegen-arm.cc
arm/frames-arm.cc
+ arm/full-codegen-arm.cc
arm/ic-arm.cc
arm/jump-target-arm.cc
arm/macro-assembler-arm.cc
@@ -137,6 +140,7 @@ SOURCES = {
ia32/disasm-ia32.cc
ia32/fast-codegen-ia32.cc
ia32/frames-ia32.cc
+ ia32/full-codegen-ia32.cc
ia32/ic-ia32.cc
ia32/jump-target-ia32.cc
ia32/macro-assembler-ia32.cc
@@ -154,6 +158,7 @@ SOURCES = {
x64/disasm-x64.cc
x64/fast-codegen-x64.cc
x64/frames-x64.cc
+ x64/full-codegen-x64.cc
x64/ic-x64.cc
x64/jump-target-x64.cc
x64/macro-assembler-x64.cc
@@ -239,7 +244,7 @@ def ConfigureObjectFiles():
env.Replace(**context.flags['v8'])
context.ApplyEnvOverrides(env)
env['BUILDERS']['JS2C'] = Builder(action=js2c.JS2C)
- env['BUILDERS']['Snapshot'] = Builder(action='$SOURCE $TARGET --logfile "$LOGFILE"')
+ env['BUILDERS']['Snapshot'] = Builder(action='$SOURCE $TARGET --logfile "$LOGFILE" --log-snapshot-positions')
# Build the standard platform-independent source files.
source_files = context.GetRelevantSources(SOURCES)
diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/accessors.cc
index 56cf13598..5a029285e 100644
--- a/deps/v8/src/accessors.cc
+++ b/deps/v8/src/accessors.cc
@@ -493,11 +493,11 @@ Object* Accessors::FunctionGetLength(Object* object, void*) {
// If the function isn't compiled yet, the length is not computed
// correctly yet. Compile it now and return the right length.
HandleScope scope;
- Handle<JSFunction> function_handle(function);
- if (!CompileLazy(function_handle, KEEP_EXCEPTION)) {
+ Handle<SharedFunctionInfo> shared(function->shared());
+ if (!CompileLazyShared(shared, KEEP_EXCEPTION)) {
return Failure::Exception();
}
- return Smi::FromInt(function_handle->shared()->length());
+ return Smi::FromInt(shared->length());
} else {
return Smi::FromInt(function->shared()->length());
}
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index ab5d0a560..322c90fc5 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -3669,7 +3669,6 @@ void Debug::SetMessageHandler(v8::Debug::MessageHandler handler,
void Debug::SetMessageHandler2(v8::Debug::MessageHandler2 handler) {
EnsureInitialized("v8::Debug::SetMessageHandler");
ENTER_V8;
- HandleScope scope;
i::Debugger::SetMessageHandler(handler);
}
@@ -3691,10 +3690,10 @@ void Debug::SetHostDispatchHandler(HostDispatchHandler handler,
void Debug::SetDebugMessageDispatchHandler(
- DebugMessageDispatchHandler handler) {
+ DebugMessageDispatchHandler handler, bool provide_locker) {
EnsureInitialized("v8::Debug::SetDebugMessageDispatchHandler");
ENTER_V8;
- i::Debugger::SetDebugMessageDispatchHandler(handler);
+ i::Debugger::SetDebugMessageDispatchHandler(handler, provide_locker);
}
@@ -3744,6 +3743,11 @@ Local<Value> Debug::GetMirror(v8::Handle<v8::Value> obj) {
bool Debug::EnableAgent(const char* name, int port, bool wait_for_connection) {
return i::Debugger::StartAgent(name, port, wait_for_connection);
}
+
+void Debug::ProcessDebugMessages() {
+ i::Execution::ProcessDebugMesssages(true);
+}
+
#endif // ENABLE_DEBUGGER_SUPPORT
namespace internal {
diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h
index fd2fcd305..354436cb1 100644
--- a/deps/v8/src/arm/assembler-arm-inl.h
+++ b/deps/v8/src/arm/assembler-arm-inl.h
@@ -174,20 +174,6 @@ Operand::Operand(const ExternalReference& f) {
}
-Operand::Operand(Object** opp) {
- rm_ = no_reg;
- imm32_ = reinterpret_cast<int32_t>(opp);
- rmode_ = RelocInfo::NONE;
-}
-
-
-Operand::Operand(Context** cpp) {
- rm_ = no_reg;
- imm32_ = reinterpret_cast<int32_t>(cpp);
- rmode_ = RelocInfo::NONE;
-}
-
-
Operand::Operand(Smi* value) {
rm_ = no_reg;
imm32_ = reinterpret_cast<intptr_t>(value);
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index 07da80090..74547be6e 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -30,9 +30,9 @@
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
-// The original source code covered by the above license above has been modified
-// significantly by Google Inc.
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2010 the V8 project authors. All rights reserved.
#include "v8.h"
@@ -1371,6 +1371,36 @@ void Assembler::stc2(Coprocessor coproc,
// Support for VFP.
+void Assembler::vldr(const DwVfpRegister dst,
+ const Register base,
+ int offset,
+ const Condition cond) {
+ // Ddst = MEM(Rbase + offset).
+ // Instruction details available in ARM DDI 0406A, A8-628.
+ // cond(31-28) | 1101(27-24)| 1001(23-20) | Rbase(19-16) |
+ // Vdst(15-12) | 1011(11-8) | offset
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(offset % 4 == 0);
+ emit(cond | 0xD9*B20 | base.code()*B16 | dst.code()*B12 |
+ 0xB*B8 | ((offset / 4) & 255));
+}
+
+
+void Assembler::vstr(const DwVfpRegister src,
+ const Register base,
+ int offset,
+ const Condition cond) {
+ // MEM(Rbase + offset) = Dsrc.
+ // Instruction details available in ARM DDI 0406A, A8-786.
+ // cond(31-28) | 1101(27-24)| 1000(23-20) | | Rbase(19-16) |
+ // Vsrc(15-12) | 1011(11-8) | (offset/4)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(offset % 4 == 0);
+ emit(cond | 0xD8*B20 | base.code()*B16 | src.code()*B12 |
+ 0xB*B8 | ((offset / 4) & 255));
+}
+
+
void Assembler::vmov(const DwVfpRegister dst,
const Register src1,
const Register src2,
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index cd53dd609..208d583ce 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -30,9 +30,9 @@
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
-// The original source code covered by the above license above has been modified
-// significantly by Google Inc.
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2010 the V8 project authors. All rights reserved.
// A light-weight ARM Assembler
// Generates user mode instructions for the ARM architecture up to version 5
@@ -398,8 +398,6 @@ class Operand BASE_EMBEDDED {
RelocInfo::Mode rmode = RelocInfo::NONE));
INLINE(explicit Operand(const ExternalReference& f));
INLINE(explicit Operand(const char* s));
- INLINE(explicit Operand(Object** opp));
- INLINE(explicit Operand(Context** cpp));
explicit Operand(Handle<Object> handle);
INLINE(explicit Operand(Smi* value));
@@ -796,6 +794,14 @@ class Assembler : public Malloced {
// However, some simple modifications can allow
// these APIs to support D16 to D31.
+ void vldr(const DwVfpRegister dst,
+ const Register base,
+ int offset, // Offset must be a multiple of 4.
+ const Condition cond = al);
+ void vstr(const DwVfpRegister src,
+ const Register base,
+ int offset, // Offset must be a multiple of 4.
+ const Condition cond = al);
void vmov(const DwVfpRegister dst,
const Register src1,
const Register src2,
diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc
index 5389a3c5f..ae7dae3b0 100644
--- a/deps/v8/src/arm/builtins-arm.cc
+++ b/deps/v8/src/arm/builtins-arm.cc
@@ -38,15 +38,32 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-void Builtins::Generate_Adaptor(MacroAssembler* masm, CFunctionId id) {
- // TODO(428): Don't pass the function in a static variable.
- __ mov(ip, Operand(ExternalReference::builtin_passed_function()));
- __ str(r1, MemOperand(ip, 0));
-
- // The actual argument count has already been loaded into register
- // r0, but JumpToRuntime expects r0 to contain the number of
- // arguments including the receiver.
- __ add(r0, r0, Operand(1));
+void Builtins::Generate_Adaptor(MacroAssembler* masm,
+ CFunctionId id,
+ BuiltinExtraArguments extra_args) {
+ // ----------- S t a t e -------------
+ // -- r0 : number of arguments excluding receiver
+ // -- r1 : called function (only guaranteed when
+ // extra_args requires it)
+ // -- cp : context
+ // -- sp[0] : last argument
+ // -- ...
+ // -- sp[4 * (argc - 1)] : first argument (argc == r0)
+ // -- sp[4 * argc] : receiver
+ // -----------------------------------
+
+ // Insert extra arguments.
+ int num_extra_args = 0;
+ if (extra_args == NEEDS_CALLED_FUNCTION) {
+ num_extra_args = 1;
+ __ push(r1);
+ } else {
+ ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
+ }
+
+ // JumpToRuntime expects r0 to contain the number of arguments
+ // including the receiver and the extra arguments.
+ __ add(r0, r0, Operand(num_extra_args + 1));
__ JumpToRuntime(ExternalReference(id));
}
@@ -491,7 +508,8 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
}
-void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
+static void Generate_JSConstructStubHelper(MacroAssembler* masm,
+ bool is_api_function) {
// Enter a construct frame.
__ EnterConstructFrame();
@@ -727,8 +745,17 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Call the function.
// r0: number of arguments
// r1: constructor function
- ParameterCount actual(r0);
- __ InvokeFunction(r1, actual, CALL_FUNCTION);
+ if (is_api_function) {
+ __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+ Handle<Code> code = Handle<Code>(
+ Builtins::builtin(Builtins::HandleApiCallConstruct));
+ ParameterCount expected(0);
+ __ InvokeCode(code, expected, expected,
+ RelocInfo::CODE_TARGET, CALL_FUNCTION);
+ } else {
+ ParameterCount actual(r0);
+ __ InvokeFunction(r1, actual, CALL_FUNCTION);
+ }
// Pop the function from the stack.
// sp[0]: constructor function
@@ -783,6 +810,16 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
}
+void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, false);
+}
+
+
+void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, true);
+}
+
+
static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
bool is_construct) {
// Called from Generate_JS_Entry
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index 70d8ab495..ea4b165fd 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -47,7 +47,7 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
Condition cc,
bool never_nan_nan);
static void EmitSmiNonsmiComparison(MacroAssembler* masm,
- Label* rhs_not_nan,
+ Label* lhs_not_nan,
Label* slow,
bool strict);
static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc);
@@ -121,12 +121,13 @@ CodeGenState::~CodeGenState() {
// -------------------------------------------------------------------------
// CodeGenerator implementation
-CodeGenerator::CodeGenerator(int buffer_size, Handle<Script> script,
+CodeGenerator::CodeGenerator(MacroAssembler* masm,
+ Handle<Script> script,
bool is_eval)
: is_eval_(is_eval),
script_(script),
deferred_(8),
- masm_(new MacroAssembler(NULL, buffer_size)),
+ masm_(masm),
scope_(NULL),
frame_(NULL),
allocator_(NULL),
@@ -142,7 +143,9 @@ CodeGenerator::CodeGenerator(int buffer_size, Handle<Script> script,
// r1: called JS function
// cp: callee's context
-void CodeGenerator::GenCode(FunctionLiteral* fun) {
+void CodeGenerator::Generate(FunctionLiteral* fun,
+ Mode mode,
+ CompilationInfo* info) {
// Record the position for debugging purposes.
CodeForFunctionPosition(fun);
@@ -168,8 +171,7 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) {
// r1: called JS function
// cp: callee's context
allocator_->Initialize();
- frame_->Enter();
- // tos: code slot
+
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
fun->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
@@ -178,104 +180,118 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) {
}
#endif
- // Allocate space for locals and initialize them. This also checks
- // for stack overflow.
- frame_->AllocateStackSlots();
- // Initialize the function return target after the locals are set
- // up, because it needs the expected frame height from the frame.
- function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
- function_return_is_shadowed_ = false;
+ if (mode == PRIMARY) {
+ frame_->Enter();
+ // tos: code slot
- VirtualFrame::SpilledScope spilled_scope;
- int heap_slots = scope_->num_heap_slots();
- if (heap_slots > 0) {
- // Allocate local context.
- // Get outer context and create a new context based on it.
- __ ldr(r0, frame_->Function());
- frame_->EmitPush(r0);
- if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(heap_slots);
- frame_->CallStub(&stub, 1);
- } else {
- frame_->CallRuntime(Runtime::kNewContext, 1);
- }
+ // Allocate space for locals and initialize them. This also checks
+ // for stack overflow.
+ frame_->AllocateStackSlots();
+
+ VirtualFrame::SpilledScope spilled_scope;
+ int heap_slots = scope_->num_heap_slots();
+ if (heap_slots > 0) {
+ // Allocate local context.
+ // Get outer context and create a new context based on it.
+ __ ldr(r0, frame_->Function());
+ frame_->EmitPush(r0);
+ if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(heap_slots);
+ frame_->CallStub(&stub, 1);
+ } else {
+ frame_->CallRuntime(Runtime::kNewContext, 1);
+ }
#ifdef DEBUG
- JumpTarget verified_true;
- __ cmp(r0, Operand(cp));
- verified_true.Branch(eq);
- __ stop("NewContext: r0 is expected to be the same as cp");
- verified_true.Bind();
+ JumpTarget verified_true;
+ __ cmp(r0, Operand(cp));
+ verified_true.Branch(eq);
+ __ stop("NewContext: r0 is expected to be the same as cp");
+ verified_true.Bind();
#endif
- // Update context local.
- __ str(cp, frame_->Context());
- }
+ // Update context local.
+ __ str(cp, frame_->Context());
+ }
- // TODO(1241774): Improve this code:
- // 1) only needed if we have a context
- // 2) no need to recompute context ptr every single time
- // 3) don't copy parameter operand code from SlotOperand!
- {
- Comment cmnt2(masm_, "[ copy context parameters into .context");
-
- // Note that iteration order is relevant here! If we have the same
- // parameter twice (e.g., function (x, y, x)), and that parameter
- // needs to be copied into the context, it must be the last argument
- // passed to the parameter that needs to be copied. This is a rare
- // case so we don't check for it, instead we rely on the copying
- // order: such a parameter is copied repeatedly into the same
- // context location and thus the last value is what is seen inside
- // the function.
- for (int i = 0; i < scope_->num_parameters(); i++) {
- Variable* par = scope_->parameter(i);
- Slot* slot = par->slot();
- if (slot != NULL && slot->type() == Slot::CONTEXT) {
- ASSERT(!scope_->is_global_scope()); // no parameters in global scope
- __ ldr(r1, frame_->ParameterAt(i));
- // Loads r2 with context; used below in RecordWrite.
- __ str(r1, SlotOperand(slot, r2));
- // Load the offset into r3.
- int slot_offset =
- FixedArray::kHeaderSize + slot->index() * kPointerSize;
- __ mov(r3, Operand(slot_offset));
- __ RecordWrite(r2, r3, r1);
+ // TODO(1241774): Improve this code:
+ // 1) only needed if we have a context
+ // 2) no need to recompute context ptr every single time
+ // 3) don't copy parameter operand code from SlotOperand!
+ {
+ Comment cmnt2(masm_, "[ copy context parameters into .context");
+
+ // Note that iteration order is relevant here! If we have the same
+ // parameter twice (e.g., function (x, y, x)), and that parameter
+ // needs to be copied into the context, it must be the last argument
+ // passed to the parameter that needs to be copied. This is a rare
+ // case so we don't check for it, instead we rely on the copying
+ // order: such a parameter is copied repeatedly into the same
+ // context location and thus the last value is what is seen inside
+ // the function.
+ for (int i = 0; i < scope_->num_parameters(); i++) {
+ Variable* par = scope_->parameter(i);
+ Slot* slot = par->slot();
+ if (slot != NULL && slot->type() == Slot::CONTEXT) {
+ // No parameters in global scope.
+ ASSERT(!scope_->is_global_scope());
+ __ ldr(r1, frame_->ParameterAt(i));
+ // Loads r2 with context; used below in RecordWrite.
+ __ str(r1, SlotOperand(slot, r2));
+ // Load the offset into r3.
+ int slot_offset =
+ FixedArray::kHeaderSize + slot->index() * kPointerSize;
+ __ mov(r3, Operand(slot_offset));
+ __ RecordWrite(r2, r3, r1);
+ }
}
}
- }
- // Store the arguments object. This must happen after context
- // initialization because the arguments object may be stored in the
- // context.
- if (scope_->arguments() != NULL) {
- Comment cmnt(masm_, "[ allocate arguments object");
- ASSERT(scope_->arguments_shadow() != NULL);
- Variable* arguments = scope_->arguments()->var();
- Variable* shadow = scope_->arguments_shadow()->var();
- ASSERT(arguments != NULL && arguments->slot() != NULL);
- ASSERT(shadow != NULL && shadow->slot() != NULL);
- ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
- __ ldr(r2, frame_->Function());
- // The receiver is below the arguments, the return address, and the
- // frame pointer on the stack.
- const int kReceiverDisplacement = 2 + scope_->num_parameters();
- __ add(r1, fp, Operand(kReceiverDisplacement * kPointerSize));
- __ mov(r0, Operand(Smi::FromInt(scope_->num_parameters())));
- frame_->Adjust(3);
- __ stm(db_w, sp, r0.bit() | r1.bit() | r2.bit());
- frame_->CallStub(&stub, 3);
- frame_->EmitPush(r0);
- StoreToSlot(arguments->slot(), NOT_CONST_INIT);
- StoreToSlot(shadow->slot(), NOT_CONST_INIT);
- frame_->Drop(); // Value is no longer needed.
- }
+ // Store the arguments object. This must happen after context
+ // initialization because the arguments object may be stored in the
+ // context.
+ if (scope_->arguments() != NULL) {
+ Comment cmnt(masm_, "[ allocate arguments object");
+ ASSERT(scope_->arguments_shadow() != NULL);
+ Variable* arguments = scope_->arguments()->var();
+ Variable* shadow = scope_->arguments_shadow()->var();
+ ASSERT(arguments != NULL && arguments->slot() != NULL);
+ ASSERT(shadow != NULL && shadow->slot() != NULL);
+ ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
+ __ ldr(r2, frame_->Function());
+ // The receiver is below the arguments, the return address, and the
+ // frame pointer on the stack.
+ const int kReceiverDisplacement = 2 + scope_->num_parameters();
+ __ add(r1, fp, Operand(kReceiverDisplacement * kPointerSize));
+ __ mov(r0, Operand(Smi::FromInt(scope_->num_parameters())));
+ frame_->Adjust(3);
+ __ stm(db_w, sp, r0.bit() | r1.bit() | r2.bit());
+ frame_->CallStub(&stub, 3);
+ frame_->EmitPush(r0);
+ StoreToSlot(arguments->slot(), NOT_CONST_INIT);
+ StoreToSlot(shadow->slot(), NOT_CONST_INIT);
+ frame_->Drop(); // Value is no longer needed.
+ }
- // Initialize ThisFunction reference if present.
- if (scope_->is_function_scope() && scope_->function() != NULL) {
- __ mov(ip, Operand(Factory::the_hole_value()));
- frame_->EmitPush(ip);
- StoreToSlot(scope_->function()->slot(), NOT_CONST_INIT);
+ // Initialize ThisFunction reference if present.
+ if (scope_->is_function_scope() && scope_->function() != NULL) {
+ __ mov(ip, Operand(Factory::the_hole_value()));
+ frame_->EmitPush(ip);
+ StoreToSlot(scope_->function()->slot(), NOT_CONST_INIT);
+ }
+ } else {
+ // When used as the secondary compiler for splitting, r1, cp,
+ // fp, and lr have been pushed on the stack. Adjust the virtual
+ // frame to match this state.
+ frame_->Adjust(4);
+ allocator_->Unuse(r1);
+ allocator_->Unuse(lr);
}
+ // Initialize the function return target after the locals are set
+ // up, because it needs the expected frame height from the frame.
+ function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
+ function_return_is_shadowed_ = false;
+
// Generate code to 'execute' declarations and initialize functions
// (source elements). In case of an illegal redeclaration we need to
// handle that instead of processing the declarations.
@@ -605,14 +621,19 @@ void CodeGenerator::LoadTypeofExpression(Expression* expr) {
}
-Reference::Reference(CodeGenerator* cgen, Expression* expression)
- : cgen_(cgen), expression_(expression), type_(ILLEGAL) {
+Reference::Reference(CodeGenerator* cgen,
+ Expression* expression,
+ bool persist_after_get)
+ : cgen_(cgen),
+ expression_(expression),
+ type_(ILLEGAL),
+ persist_after_get_(persist_after_get) {
cgen->LoadReference(this);
}
Reference::~Reference() {
- cgen_->UnloadReference(this);
+ ASSERT(is_unloaded() || is_illegal());
}
@@ -661,6 +682,7 @@ void CodeGenerator::UnloadReference(Reference* ref) {
frame_->Drop(size);
frame_->EmitPush(r0);
}
+ ref->set_unloaded();
}
@@ -1091,7 +1113,8 @@ void CodeGenerator::Comparison(Condition cc,
// Call the function on the stack with the given arguments.
void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
- int position) {
+ CallFunctionFlags flags,
+ int position) {
VirtualFrame::SpilledScope spilled_scope;
// Push the arguments ("left-to-right") on the stack.
int arg_count = args->length();
@@ -1104,7 +1127,7 @@ void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
// Use the shared code stub to call the function.
InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
- CallFunctionStub call_function(arg_count, in_loop);
+ CallFunctionStub call_function(arg_count, in_loop, flags);
frame_->CallStub(&call_function, arg_count + 1);
// Restore context and pop function from the stack.
@@ -1243,8 +1266,6 @@ void CodeGenerator::VisitDeclaration(Declaration* node) {
Reference target(this, node->proxy());
LoadAndSpill(val);
target.SetValue(NOT_CONST_INIT);
- // The reference is removed from the stack (preserving TOS) when
- // it goes out of scope.
}
// Get rid of the assigned value (declarations are statements).
frame_->Drop();
@@ -1931,25 +1952,17 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
if (each.size() > 0) {
__ ldr(r0, frame_->ElementAt(each.size()));
frame_->EmitPush(r0);
- }
- // If the reference was to a slot we rely on the convenient property
- // that it doesn't matter whether a value (eg, r3 pushed above) is
- // right on top of or right underneath a zero-sized reference.
- each.SetValue(NOT_CONST_INIT);
- if (each.size() > 0) {
- // It's safe to pop the value lying on top of the reference before
- // unloading the reference itself (which preserves the top of stack,
- // ie, now the topmost value of the non-zero sized reference), since
- // we will discard the top of stack after unloading the reference
- // anyway.
- frame_->EmitPop(r0);
+ each.SetValue(NOT_CONST_INIT);
+ frame_->Drop(2);
+ } else {
+ // If the reference was to a slot we rely on the convenient property
+ // that it doesn't matter whether a value (eg, r3 pushed above) is
+ // right on top of or right underneath a zero-sized reference.
+ each.SetValue(NOT_CONST_INIT);
+ frame_->Drop();
}
}
}
- // Discard the i'th entry pushed above or else the remainder of the
- // reference, whichever is currently on top of the stack.
- frame_->Drop();
-
// Body.
CheckStack(); // TODO(1222600): ignore if body contains calls.
VisitAndSpill(node->body());
@@ -2289,7 +2302,8 @@ void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
Comment cmnt(masm_, "[ DebuggerStatament");
CodeForStatementPosition(node);
#ifdef ENABLE_DEBUGGER_SUPPORT
- frame_->CallRuntime(Runtime::kDebugBreak, 0);
+ DebuggerStatementStub ces;
+ frame_->CallStub(&ces, 0);
#endif
// Ignore the return value.
ASSERT(frame_->height() == original_height);
@@ -2592,13 +2606,12 @@ void CodeGenerator::LoadFromGlobalSlotCheckExtensions(Slot* slot,
// Load the global object.
LoadGlobal();
// Setup the name register.
- Result name(r2);
__ mov(r2, Operand(slot->var()->name()));
// Call IC stub.
if (typeof_state == INSIDE_TYPEOF) {
- frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET, &name, 0);
+ frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
} else {
- frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET_CONTEXT, &name, 0);
+ frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET_CONTEXT, 0);
}
// Drop the global object. The result is in r0.
@@ -2843,7 +2856,7 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ Assignment");
- { Reference target(this, node->target());
+ { Reference target(this, node->target(), node->is_compound());
if (target.is_illegal()) {
// Fool the virtual frame into thinking that we left the assignment's
// value on the frame.
@@ -2858,8 +2871,7 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
node->op() == Token::INIT_CONST) {
LoadAndSpill(node->value());
- } else {
- // +=, *= and similar binary assignments.
+ } else { // Assignment is a compound assignment.
// Get the old value of the lhs.
target.GetValueAndSpill();
Literal* literal = node->value()->AsLiteral();
@@ -2880,13 +2892,12 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
frame_->EmitPush(r0);
}
}
-
Variable* var = node->target()->AsVariableProxy()->AsVariable();
if (var != NULL &&
(var->mode() == Variable::CONST) &&
node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
// Assignment ignored - leave the value on the stack.
-
+ UnloadReference(&target);
} else {
CodeForSourcePosition(node->position());
if (node->op() == Token::INIT_CONST) {
@@ -2999,7 +3010,7 @@ void CodeGenerator::VisitCall(Call* node) {
CodeForSourcePosition(node->position());
InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
- CallFunctionStub call_function(arg_count, in_loop);
+ CallFunctionStub call_function(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
frame_->CallStub(&call_function, arg_count + 1);
__ ldr(cp, frame_->Context());
@@ -3056,7 +3067,7 @@ void CodeGenerator::VisitCall(Call* node) {
frame_->EmitPush(r1); // receiver
// Call the function.
- CallWithArguments(args, node->position());
+ CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
frame_->EmitPush(r0);
} else if (property != NULL) {
@@ -3096,20 +3107,24 @@ void CodeGenerator::VisitCall(Call* node) {
// JavaScript example: 'array[index](1, 2, 3)'
// -------------------------------------------
- // Load the function to call from the property through a reference.
- Reference ref(this, property);
- ref.GetValueAndSpill(); // receiver
-
- // Pass receiver to called function.
+ LoadAndSpill(property->obj());
+ LoadAndSpill(property->key());
+ EmitKeyedLoad(false);
+ frame_->Drop(); // key
+ // Put the function below the receiver.
if (property->is_synthetic()) {
+ // Use the global receiver.
+ frame_->Drop();
+ frame_->EmitPush(r0);
LoadGlobalReceiver(r0);
} else {
- __ ldr(r0, frame_->ElementAt(ref.size()));
- frame_->EmitPush(r0);
+ frame_->EmitPop(r1); // receiver
+ frame_->EmitPush(r0); // function
+ frame_->EmitPush(r1); // receiver
}
// Call the function.
- CallWithArguments(args, node->position());
+ CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position());
frame_->EmitPush(r0);
}
@@ -3125,7 +3140,7 @@ void CodeGenerator::VisitCall(Call* node) {
LoadGlobalReceiver(r0);
// Call the function.
- CallWithArguments(args, node->position());
+ CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
frame_->EmitPush(r0);
}
ASSERT(frame_->height() == original_height + 1);
@@ -3159,22 +3174,15 @@ void CodeGenerator::VisitCallNew(CallNew* node) {
}
// r0: the number of arguments.
- Result num_args(r0);
__ mov(r0, Operand(arg_count));
-
// Load the function into r1 as per calling convention.
- Result function(r1);
__ ldr(r1, frame_->ElementAt(arg_count + 1));
// Call the construct call builtin that handles allocation and
// constructor invocation.
CodeForSourcePosition(node->position());
Handle<Code> ic(Builtins::builtin(Builtins::JSConstructCall));
- frame_->CallCodeObject(ic,
- RelocInfo::CONSTRUCT_CALL,
- &num_args,
- &function,
- arg_count + 1);
+ frame_->CallCodeObject(ic, RelocInfo::CONSTRUCT_CALL, arg_count + 1);
// Discard old TOS value and push r0 on the stack (same as Pop(), push(r0)).
__ str(r0, frame_->Top());
@@ -3469,6 +3477,20 @@ void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
}
+void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
+ VirtualFrame::SpilledScope spilled_scope;
+ ASSERT(args->length() == 1);
+ LoadAndSpill(args->at(0));
+ frame_->EmitPop(r0);
+ __ tst(r0, Operand(kSmiTagMask));
+ false_target()->Branch(eq);
+ __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ ldrb(r1, FieldMemOperand(r1, Map::kBitFieldOffset));
+ __ tst(r1, Operand(1 << Map::kIsUndetectable));
+ cc_reg_ = ne;
+}
+
+
void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
VirtualFrame::SpilledScope spilled_scope;
ASSERT(args->length() == 0);
@@ -3561,7 +3583,8 @@ void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
Load(args->at(0));
Load(args->at(1));
- frame_->CallRuntime(Runtime::kStringCompare, 2);
+ StringCompareStub stub;
+ frame_->CallStub(&stub, 2);
frame_->EmitPush(r0);
}
@@ -3709,6 +3732,9 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
frame_->EmitPush(r0); // r0 has result
} else {
+ bool overwrite =
+ (node->expression()->AsBinaryOperation() != NULL &&
+ node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
LoadAndSpill(node->expression());
frame_->EmitPop(r0);
switch (op) {
@@ -3719,9 +3745,6 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
break;
case Token::SUB: {
- bool overwrite =
- (node->expression()->AsBinaryOperation() != NULL &&
- node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
GenericUnaryOpStub stub(Token::SUB, overwrite);
frame_->CallStub(&stub, 0);
break;
@@ -3734,10 +3757,10 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
__ tst(r0, Operand(kSmiTagMask));
smi_label.Branch(eq);
- frame_->EmitPush(r0);
- frame_->InvokeBuiltin(Builtins::BIT_NOT, CALL_JS, 1);
-
+ GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
+ frame_->CallStub(&stub, 0);
continue_label.Jump();
+
smi_label.Bind();
__ mvn(r0, Operand(r0));
__ bic(r0, r0, Operand(kSmiTagMask)); // bit-clear inverted smi-tag
@@ -3791,7 +3814,9 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
frame_->EmitPush(r0);
}
- { Reference target(this, node->expression());
+ // A constant reference is not saved to, so a constant reference is not a
+ // compound assignment reference.
+ { Reference target(this, node->expression(), !is_const);
if (target.is_illegal()) {
// Spoof the virtual frame to have the expected height (one higher
// than on entry).
@@ -4252,6 +4277,16 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
}
+void CodeGenerator::EmitKeyedLoad(bool is_global) {
+ Comment cmnt(masm_, "[ Load from keyed Property");
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ RelocInfo::Mode rmode = is_global
+ ? RelocInfo::CODE_TARGET_CONTEXT
+ : RelocInfo::CODE_TARGET;
+ frame_->CallCodeObject(ic, rmode, 0);
+}
+
+
#ifdef DEBUG
bool CodeGenerator::HasValidEntryRegisters() { return true; }
#endif
@@ -4304,13 +4339,12 @@ void Reference::GetValue() {
Variable* var = expression_->AsVariableProxy()->AsVariable();
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
// Setup the name register.
- Result name_reg(r2);
__ mov(r2, Operand(name));
ASSERT(var == NULL || var->is_global());
RelocInfo::Mode rmode = (var == NULL)
? RelocInfo::CODE_TARGET
: RelocInfo::CODE_TARGET_CONTEXT;
- frame->CallCodeObject(ic, rmode, &name_reg, 0);
+ frame->CallCodeObject(ic, rmode, 0);
frame->EmitPush(r0);
break;
}
@@ -4318,23 +4352,21 @@ void Reference::GetValue() {
case KEYED: {
// TODO(181): Implement inlined version of array indexing once
// loop nesting is properly tracked on ARM.
- VirtualFrame* frame = cgen_->frame();
- Comment cmnt(masm, "[ Load from keyed Property");
ASSERT(property != NULL);
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
Variable* var = expression_->AsVariableProxy()->AsVariable();
ASSERT(var == NULL || var->is_global());
- RelocInfo::Mode rmode = (var == NULL)
- ? RelocInfo::CODE_TARGET
- : RelocInfo::CODE_TARGET_CONTEXT;
- frame->CallCodeObject(ic, rmode, 0);
- frame->EmitPush(r0);
+ cgen_->EmitKeyedLoad(var != NULL);
+ cgen_->frame()->EmitPush(r0);
break;
}
default:
UNREACHABLE();
}
+
+ if (!persist_after_get_) {
+ cgen_->UnloadReference(this);
+ }
}
@@ -4353,6 +4385,7 @@ void Reference::SetValue(InitState init_state) {
Comment cmnt(masm, "[ Store to Slot");
Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
cgen_->StoreToSlot(slot, init_state);
+ cgen_->UnloadReference(this);
break;
}
@@ -4362,18 +4395,12 @@ void Reference::SetValue(InitState init_state) {
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
Handle<String> name(GetName());
- Result value(r0);
frame->EmitPop(r0);
-
// Setup the name register.
- Result property_name(r2);
__ mov(r2, Operand(name));
- frame->CallCodeObject(ic,
- RelocInfo::CODE_TARGET,
- &value,
- &property_name,
- 0);
+ frame->CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
frame->EmitPush(r0);
+ cgen_->UnloadReference(this);
break;
}
@@ -4386,10 +4413,10 @@ void Reference::SetValue(InitState init_state) {
// Call IC code.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
// TODO(1222589): Make the IC grab the values from the stack.
- Result value(r0);
frame->EmitPop(r0); // value
- frame->CallCodeObject(ic, RelocInfo::CODE_TARGET, &value, 0);
+ frame->CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
frame->EmitPush(r0);
+ cgen_->UnloadReference(this);
break;
}
@@ -4777,7 +4804,7 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
} else if (cc == gt) {
__ mov(r0, Operand(LESS)); // Things aren't greater than themselves.
} else {
- __ mov(r0, Operand(0)); // Things are <=, >=, ==, === themselves.
+ __ mov(r0, Operand(EQUAL)); // Things are <=, >=, ==, === themselves.
}
__ mov(pc, Operand(lr)); // Return.
@@ -4789,6 +4816,7 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
__ bind(&heap_number);
// It is a heap number, so return non-equal if it's NaN and equal if it's
// not NaN.
+
// The representation of NaN values has all exponent bits (52..62) set,
// and not all mantissa bits (0..51) clear.
// Read top bits of double representation (second word of value).
@@ -4804,9 +4832,9 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
__ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
__ orr(r0, r3, Operand(r2), SetCC);
// For equal we already have the right value in r0: Return zero (equal)
- // if all bits in mantissa are zero (it's an Infinity) and non-zero if not
- // (it's a NaN). For <= and >= we need to load r0 with the failing value
- // if it's a NaN.
+ // if all bits in mantissa are zero (it's an Infinity) and non-zero if
+ // not (it's a NaN). For <= and >= we need to load r0 with the failing
+ // value if it's a NaN.
if (cc != eq) {
// All-zero means Infinity means equal.
__ mov(pc, Operand(lr), LeaveCC, eq); // Return equal
@@ -4827,17 +4855,17 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
// See comment at call site.
static void EmitSmiNonsmiComparison(MacroAssembler* masm,
- Label* rhs_not_nan,
+ Label* lhs_not_nan,
Label* slow,
bool strict) {
- Label lhs_is_smi;
+ Label rhs_is_smi;
__ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &lhs_is_smi);
+ __ b(eq, &rhs_is_smi);
- // Rhs is a Smi. Check whether the non-smi is a heap number.
+ // Lhs is a Smi. Check whether the rhs is a heap number.
__ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
if (strict) {
- // If lhs was not a number and rhs was a Smi then strict equality cannot
+ // If rhs is not a number and lhs is a Smi then strict equality cannot
// succeed. Return non-equal (r0 is already not zero)
__ mov(pc, Operand(lr), LeaveCC, ne); // Return.
} else {
@@ -4846,92 +4874,104 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
__ b(ne, slow);
}
- // Rhs is a smi, lhs is a number.
- __ push(lr);
-
+ // Lhs (r1) is a smi, rhs (r0) is a number.
if (CpuFeatures::IsSupported(VFP3)) {
+ // Convert lhs to a double in d7 .
CpuFeatures::Scope scope(VFP3);
- __ IntegerToDoubleConversionWithVFP3(r1, r3, r2);
+ __ mov(r7, Operand(r1, ASR, kSmiTagSize));
+ __ vmov(s15, r7);
+ __ vcvt(d7, s15);
+ // Load the double from rhs, tagged HeapNumber r0, to d6.
+ __ sub(r7, r0, Operand(kHeapObjectTag));
+ __ vldr(d6, r7, HeapNumber::kValueOffset);
} else {
+ __ push(lr);
+ // Convert lhs to a double in r2, r3.
__ mov(r7, Operand(r1));
ConvertToDoubleStub stub1(r3, r2, r7, r6);
__ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
+ // Load rhs to a double in r0, r1.
+ __ ldr(r1, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize));
+ __ ldr(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
+ __ pop(lr);
}
-
- // r3 and r2 are rhs as double.
- __ ldr(r1, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize));
- __ ldr(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
// We now have both loaded as doubles but we can skip the lhs nan check
- // since it's a Smi.
- __ pop(lr);
- __ jmp(rhs_not_nan);
+ // since it's a smi.
+ __ jmp(lhs_not_nan);
- __ bind(&lhs_is_smi);
- // Lhs is a Smi. Check whether the non-smi is a heap number.
+ __ bind(&rhs_is_smi);
+ // Rhs is a smi. Check whether the non-smi lhs is a heap number.
__ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE);
if (strict) {
- // If lhs was not a number and rhs was a Smi then strict equality cannot
+ // If lhs is not a number and rhs is a smi then strict equality cannot
// succeed. Return non-equal.
__ mov(r0, Operand(1), LeaveCC, ne); // Non-zero indicates not equal.
__ mov(pc, Operand(lr), LeaveCC, ne); // Return.
} else {
- // Smi compared non-strictly with a non-Smi non-heap-number. Call
+ // Smi compared non-strictly with a non-smi non-heap-number. Call
// the runtime.
__ b(ne, slow);
}
- // Lhs is a smi, rhs is a number.
- // r0 is Smi and r1 is heap number.
- __ push(lr);
- __ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
- __ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize));
-
+ // Rhs (r0) is a smi, lhs (r1) is a heap number.
if (CpuFeatures::IsSupported(VFP3)) {
+ // Convert rhs to a double in d6 .
CpuFeatures::Scope scope(VFP3);
- __ IntegerToDoubleConversionWithVFP3(r0, r1, r0);
+ // Load the double from lhs, tagged HeapNumber r1, to d7.
+ __ sub(r7, r1, Operand(kHeapObjectTag));
+ __ vldr(d7, r7, HeapNumber::kValueOffset);
+ __ mov(r7, Operand(r0, ASR, kSmiTagSize));
+ __ vmov(s13, r7);
+ __ vcvt(d6, s13);
} else {
+ __ push(lr);
+ // Load lhs to a double in r2, r3.
+ __ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize));
+ __ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
+ // Convert rhs to a double in r0, r1.
__ mov(r7, Operand(r0));
ConvertToDoubleStub stub2(r1, r0, r7, r6);
__ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
+ __ pop(lr);
}
-
- __ pop(lr);
// Fall through to both_loaded_as_doubles.
}
-void EmitNanCheck(MacroAssembler* masm, Label* rhs_not_nan, Condition cc) {
+void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cc) {
bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
- Register lhs_exponent = exp_first ? r0 : r1;
- Register rhs_exponent = exp_first ? r2 : r3;
- Register lhs_mantissa = exp_first ? r1 : r0;
- Register rhs_mantissa = exp_first ? r3 : r2;
+ Register rhs_exponent = exp_first ? r0 : r1;
+ Register lhs_exponent = exp_first ? r2 : r3;
+ Register rhs_mantissa = exp_first ? r1 : r0;
+ Register lhs_mantissa = exp_first ? r3 : r2;
Label one_is_nan, neither_is_nan;
+ Label lhs_not_nan_exp_mask_is_loaded;
Register exp_mask_reg = r5;
__ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask));
- __ and_(r4, rhs_exponent, Operand(exp_mask_reg));
+ __ and_(r4, lhs_exponent, Operand(exp_mask_reg));
__ cmp(r4, Operand(exp_mask_reg));
- __ b(ne, rhs_not_nan);
+ __ b(ne, &lhs_not_nan_exp_mask_is_loaded);
__ mov(r4,
- Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
+ Operand(lhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
SetCC);
__ b(ne, &one_is_nan);
- __ cmp(rhs_mantissa, Operand(0));
+ __ cmp(lhs_mantissa, Operand(0));
__ b(ne, &one_is_nan);
- __ bind(rhs_not_nan);
+ __ bind(lhs_not_nan);
__ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask));
- __ and_(r4, lhs_exponent, Operand(exp_mask_reg));
+ __ bind(&lhs_not_nan_exp_mask_is_loaded);
+ __ and_(r4, rhs_exponent, Operand(exp_mask_reg));
__ cmp(r4, Operand(exp_mask_reg));
__ b(ne, &neither_is_nan);
__ mov(r4,
- Operand(lhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
+ Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
SetCC);
__ b(ne, &one_is_nan);
- __ cmp(lhs_mantissa, Operand(0));
+ __ cmp(rhs_mantissa, Operand(0));
__ b(eq, &neither_is_nan);
__ bind(&one_is_nan);
@@ -4951,21 +4991,21 @@ void EmitNanCheck(MacroAssembler* masm, Label* rhs_not_nan, Condition cc) {
// See comment at call site.
static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
- Register lhs_exponent = exp_first ? r0 : r1;
- Register rhs_exponent = exp_first ? r2 : r3;
- Register lhs_mantissa = exp_first ? r1 : r0;
- Register rhs_mantissa = exp_first ? r3 : r2;
+ Register rhs_exponent = exp_first ? r0 : r1;
+ Register lhs_exponent = exp_first ? r2 : r3;
+ Register rhs_mantissa = exp_first ? r1 : r0;
+ Register lhs_mantissa = exp_first ? r3 : r2;
// r0, r1, r2, r3 have the two doubles. Neither is a NaN.
if (cc == eq) {
// Doubles are not equal unless they have the same bit pattern.
// Exception: 0 and -0.
- __ cmp(lhs_mantissa, Operand(rhs_mantissa));
- __ orr(r0, lhs_mantissa, Operand(rhs_mantissa), LeaveCC, ne);
+ __ cmp(rhs_mantissa, Operand(lhs_mantissa));
+ __ orr(r0, rhs_mantissa, Operand(lhs_mantissa), LeaveCC, ne);
// Return non-zero if the numbers are unequal.
__ mov(pc, Operand(lr), LeaveCC, ne);
- __ sub(r0, lhs_exponent, Operand(rhs_exponent), SetCC);
+ __ sub(r0, rhs_exponent, Operand(lhs_exponent), SetCC);
// If exponents are equal then return 0.
__ mov(pc, Operand(lr), LeaveCC, eq);
@@ -4975,12 +5015,12 @@ static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
// We start by seeing if the mantissas (that are equal) or the bottom
// 31 bits of the rhs exponent are non-zero. If so we return not
// equal.
- __ orr(r4, rhs_mantissa, Operand(rhs_exponent, LSL, kSmiTagSize), SetCC);
+ __ orr(r4, lhs_mantissa, Operand(lhs_exponent, LSL, kSmiTagSize), SetCC);
__ mov(r0, Operand(r4), LeaveCC, ne);
__ mov(pc, Operand(lr), LeaveCC, ne); // Return conditionally.
// Now they are equal if and only if the lhs exponent is zero in its
// low 31 bits.
- __ mov(r0, Operand(lhs_exponent, LSL, kSmiTagSize));
+ __ mov(r0, Operand(rhs_exponent, LSL, kSmiTagSize));
__ mov(pc, Operand(lr));
} else {
// Call a native function to do a comparison between two non-NaNs.
@@ -5035,17 +5075,26 @@ static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
Label* both_loaded_as_doubles,
Label* not_heap_numbers,
Label* slow) {
- __ CompareObjectType(r0, r2, r2, HEAP_NUMBER_TYPE);
+ __ CompareObjectType(r0, r3, r2, HEAP_NUMBER_TYPE);
__ b(ne, not_heap_numbers);
- __ CompareObjectType(r1, r3, r3, HEAP_NUMBER_TYPE);
+ __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ cmp(r2, r3);
__ b(ne, slow); // First was a heap number, second wasn't. Go slow case.
// Both are heap numbers. Load them up then jump to the code we have
// for that.
- __ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
- __ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize));
- __ ldr(r1, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize));
- __ ldr(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ __ sub(r7, r0, Operand(kHeapObjectTag));
+ __ vldr(d6, r7, HeapNumber::kValueOffset);
+ __ sub(r7, r1, Operand(kHeapObjectTag));
+ __ vldr(d7, r7, HeapNumber::kValueOffset);
+ } else {
+ __ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
+ __ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize));
+ __ ldr(r1, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize));
+ __ ldr(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
+ }
__ jmp(both_loaded_as_doubles);
}
@@ -5070,11 +5119,12 @@ static void EmitCheckForSymbols(MacroAssembler* masm, Label* slow) {
}
-// On entry r0 and r1 are the things to be compared. On exit r0 is 0,
-// positive or negative to indicate the result of the comparison.
+// On entry r0 (rhs) and r1 (lhs) are the values to be compared.
+// On exit r0 is 0, positive or negative to indicate the result of
+// the comparison.
void CompareStub::Generate(MacroAssembler* masm) {
Label slow; // Call builtin.
- Label not_smis, both_loaded_as_doubles, rhs_not_nan;
+ Label not_smis, both_loaded_as_doubles, lhs_not_nan;
// NOTICE! This code is only reached after a smi-fast-case check, so
// it is certain that at least one operand isn't a smi.
@@ -5094,32 +5144,44 @@ void CompareStub::Generate(MacroAssembler* masm) {
// 1) Return the answer.
// 2) Go to slow.
// 3) Fall through to both_loaded_as_doubles.
- // 4) Jump to rhs_not_nan.
+ // 4) Jump to lhs_not_nan.
// In cases 3 and 4 we have found out we were dealing with a number-number
- // comparison and the numbers have been loaded into r0, r1, r2, r3 as doubles.
- EmitSmiNonsmiComparison(masm, &rhs_not_nan, &slow, strict_);
+ // comparison. If VFP3 is supported the double values of the numbers have
+ // been loaded into d7 and d6. Otherwise, the double values have been loaded
+ // into r0, r1, r2, and r3.
+ EmitSmiNonsmiComparison(masm, &lhs_not_nan, &slow, strict_);
__ bind(&both_loaded_as_doubles);
- // r0, r1, r2, r3 are the double representations of the left hand side
- // and the right hand side.
-
- // Checks for NaN in the doubles we have loaded. Can return the answer or
- // fall through if neither is a NaN. Also binds rhs_not_nan.
- EmitNanCheck(masm, &rhs_not_nan, cc_);
-
+ // The arguments have been converted to doubles and stored in d6 and d7, if
+ // VFP3 is supported, or in r0, r1, r2, and r3.
if (CpuFeatures::IsSupported(VFP3)) {
+ __ bind(&lhs_not_nan);
CpuFeatures::Scope scope(VFP3);
+ Label no_nan;
// ARMv7 VFP3 instructions to implement double precision comparison.
- __ vmov(d6, r0, r1);
- __ vmov(d7, r2, r3);
-
- __ vcmp(d6, d7);
- __ vmrs(pc);
- __ mov(r0, Operand(0), LeaveCC, eq);
- __ mov(r0, Operand(1), LeaveCC, lt);
- __ mvn(r0, Operand(0), LeaveCC, gt);
+ __ vcmp(d7, d6);
+ __ vmrs(pc); // Move vector status bits to normal status bits.
+ Label nan;
+ __ b(vs, &nan);
+ __ mov(r0, Operand(EQUAL), LeaveCC, eq);
+ __ mov(r0, Operand(LESS), LeaveCC, lt);
+ __ mov(r0, Operand(GREATER), LeaveCC, gt);
+ __ mov(pc, Operand(lr));
+
+ __ bind(&nan);
+ // If one of the sides was a NaN then the v flag is set. Load r0 with
+ // whatever it takes to make the comparison fail, since comparisons with NaN
+ // always fail.
+ if (cc_ == lt || cc_ == le) {
+ __ mov(r0, Operand(GREATER));
+ } else {
+ __ mov(r0, Operand(LESS));
+ }
__ mov(pc, Operand(lr));
} else {
+ // Checks for NaN in the doubles we have loaded. Can return the answer or
+ // fall through if neither is a NaN. Also binds lhs_not_nan.
+ EmitNanCheck(masm, &lhs_not_nan, cc_);
// Compares two doubles in r0, r1, r2, r3 that are not NaNs. Returns the
// answer. Never falls through.
EmitTwoNonNanDoubleComparison(masm, cc_);
@@ -5135,14 +5197,15 @@ void CompareStub::Generate(MacroAssembler* masm) {
}
Label check_for_symbols;
+ Label flat_string_check;
// Check for heap-number-heap-number comparison. Can jump to slow case,
// or load both doubles into r0, r1, r2, r3 and jump to the code that handles
// that case. If the inputs are not doubles then jumps to check_for_symbols.
- // In this case r2 will contain the type of r0.
+ // In this case r2 will contain the type of r0. Never falls through.
EmitCheckForTwoHeapNumbers(masm,
&both_loaded_as_doubles,
&check_for_symbols,
- &slow);
+ &flat_string_check);
__ bind(&check_for_symbols);
// In the strict case the EmitStrictTwoHeapObjectCompare already took care of
@@ -5150,10 +5213,27 @@ void CompareStub::Generate(MacroAssembler* masm) {
if (cc_ == eq && !strict_) {
// Either jumps to slow or returns the answer. Assumes that r2 is the type
// of r0 on entry.
- EmitCheckForSymbols(masm, &slow);
+ EmitCheckForSymbols(masm, &flat_string_check);
}
+ // Check for both being sequential ASCII strings, and inline if that is the
+ // case.
+ __ bind(&flat_string_check);
+
+ __ JumpIfNonSmisNotBothSequentialAsciiStrings(r0, r1, r2, r3, &slow);
+
+ __ IncrementCounter(&Counters::string_compare_native, 1, r2, r3);
+ StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
+ r1,
+ r0,
+ r2,
+ r3,
+ r4,
+ r5);
+ // Never falls through to here.
+
__ bind(&slow);
+
__ push(r1);
__ push(r0);
// Figure out which native to call and setup the arguments.
@@ -5220,10 +5300,18 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
// The new heap number is in r5. r6 and r7 are scratch.
AllocateHeapNumber(masm, &slow, r5, r6, r7);
- if (CpuFeatures::IsSupported(VFP3)) {
+ // If we have floating point hardware, inline ADD, SUB, MUL, and DIV,
+ // using registers d7 and d6 for the double values.
+ bool use_fp_registers = CpuFeatures::IsSupported(VFP3) &&
+ Token::MOD != operation;
+ if (use_fp_registers) {
CpuFeatures::Scope scope(VFP3);
- __ IntegerToDoubleConversionWithVFP3(r0, r3, r2);
- __ IntegerToDoubleConversionWithVFP3(r1, r1, r0);
+ __ mov(r7, Operand(r0, ASR, kSmiTagSize));
+ __ vmov(s15, r7);
+ __ vcvt(d7, s15);
+ __ mov(r7, Operand(r1, ASR, kSmiTagSize));
+ __ vmov(s13, r7);
+ __ vcvt(d6, s13);
} else {
// Write Smi from r0 to r3 and r2 in double format. r6 is scratch.
__ mov(r7, Operand(r0));
@@ -5305,9 +5393,16 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
if (mode == OVERWRITE_RIGHT) {
__ mov(r5, Operand(r0)); // Overwrite this heap number.
}
- // Calling convention says that second double is in r2 and r3.
- __ ldr(r2, FieldMemOperand(r0, HeapNumber::kValueOffset));
- __ ldr(r3, FieldMemOperand(r0, HeapNumber::kValueOffset + 4));
+ if (use_fp_registers) {
+ CpuFeatures::Scope scope(VFP3);
+ // Load the double from tagged HeapNumber r0 to d7.
+ __ sub(r7, r0, Operand(kHeapObjectTag));
+ __ vldr(d7, r7, HeapNumber::kValueOffset);
+ } else {
+ // Calling convention says that second double is in r2 and r3.
+ __ ldr(r2, FieldMemOperand(r0, HeapNumber::kValueOffset));
+ __ ldr(r3, FieldMemOperand(r0, HeapNumber::kValueOffset + 4));
+ }
__ jmp(&finished_loading_r0);
__ bind(&r0_is_smi);
if (mode == OVERWRITE_RIGHT) {
@@ -5315,10 +5410,12 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
AllocateHeapNumber(masm, &slow, r5, r6, r7);
}
-
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (use_fp_registers) {
CpuFeatures::Scope scope(VFP3);
- __ IntegerToDoubleConversionWithVFP3(r0, r3, r2);
+ // Convert smi in r0 to double in d7.
+ __ mov(r7, Operand(r0, ASR, kSmiTagSize));
+ __ vmov(s15, r7);
+ __ vcvt(d7, s15);
} else {
// Write Smi from r0 to r3 and r2 in double format.
__ mov(r7, Operand(r0));
@@ -5338,9 +5435,16 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
if (mode == OVERWRITE_LEFT) {
__ mov(r5, Operand(r1)); // Overwrite this heap number.
}
- // Calling convention says that first double is in r0 and r1.
- __ ldr(r0, FieldMemOperand(r1, HeapNumber::kValueOffset));
- __ ldr(r1, FieldMemOperand(r1, HeapNumber::kValueOffset + 4));
+ if (use_fp_registers) {
+ CpuFeatures::Scope scope(VFP3);
+ // Load the double from tagged HeapNumber r1 to d6.
+ __ sub(r7, r1, Operand(kHeapObjectTag));
+ __ vldr(d6, r7, HeapNumber::kValueOffset);
+ } else {
+ // Calling convention says that first double is in r0 and r1.
+ __ ldr(r0, FieldMemOperand(r1, HeapNumber::kValueOffset));
+ __ ldr(r1, FieldMemOperand(r1, HeapNumber::kValueOffset + 4));
+ }
__ jmp(&finished_loading_r1);
__ bind(&r1_is_smi);
if (mode == OVERWRITE_LEFT) {
@@ -5348,9 +5452,12 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
AllocateHeapNumber(masm, &slow, r5, r6, r7);
}
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (use_fp_registers) {
CpuFeatures::Scope scope(VFP3);
- __ IntegerToDoubleConversionWithVFP3(r1, r1, r0);
+ // Convert smi in r1 to double in d6.
+ __ mov(r7, Operand(r1, ASR, kSmiTagSize));
+ __ vmov(s13, r7);
+ __ vcvt(d6, s13);
} else {
// Write Smi from r1 to r1 and r0 in double format.
__ mov(r7, Operand(r1));
@@ -5363,22 +5470,12 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
__ bind(&finished_loading_r1);
__ bind(&do_the_call);
- // r0: Left value (least significant part of mantissa).
- // r1: Left value (sign, exponent, top of mantissa).
- // r2: Right value (least significant part of mantissa).
- // r3: Right value (sign, exponent, top of mantissa).
- // r5: Address of heap number for result.
-
- if (CpuFeatures::IsSupported(VFP3) &&
- ((Token::MUL == operation) ||
- (Token::DIV == operation) ||
- (Token::ADD == operation) ||
- (Token::SUB == operation))) {
+ // If we are inlining the operation using VFP3 instructions for
+ // add, subtract, multiply, or divide, the arguments are in d6 and d7.
+ if (use_fp_registers) {
CpuFeatures::Scope scope(VFP3);
// ARMv7 VFP3 instructions to implement
// double precision, add, subtract, multiply, divide.
- __ vmov(d6, r0, r1);
- __ vmov(d7, r2, r3);
if (Token::MUL == operation) {
__ vmul(d5, d6, d7);
@@ -5391,15 +5488,20 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
} else {
UNREACHABLE();
}
-
- __ vmov(r0, r1, d5);
-
- __ str(r0, FieldMemOperand(r5, HeapNumber::kValueOffset));
- __ str(r1, FieldMemOperand(r5, HeapNumber::kValueOffset + 4));
- __ mov(r0, Operand(r5));
+ __ sub(r0, r5, Operand(kHeapObjectTag));
+ __ vstr(d5, r0, HeapNumber::kValueOffset);
+ __ add(r0, r0, Operand(kHeapObjectTag));
__ mov(pc, lr);
return;
}
+
+ // If we did not inline the operation, then the arguments are in:
+ // r0: Left value (least significant part of mantissa).
+ // r1: Left value (sign, exponent, top of mantissa).
+ // r2: Right value (least significant part of mantissa).
+ // r3: Right value (sign, exponent, top of mantissa).
+ // r5: Address of heap number for result.
+
__ push(lr); // For later.
__ push(r5); // Address of heap number that is answer.
__ AlignStack(0);
@@ -6002,59 +6104,96 @@ void StackCheckStub::Generate(MacroAssembler* masm) {
void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
- ASSERT(op_ == Token::SUB);
-
- Label undo;
- Label slow;
- Label not_smi;
+ Label slow, done;
- // Enter runtime system if the value is not a smi.
- __ tst(r0, Operand(kSmiTagMask));
- __ b(ne, &not_smi);
-
- // Enter runtime system if the value of the expression is zero
- // to make sure that we switch between 0 and -0.
- __ cmp(r0, Operand(0));
- __ b(eq, &slow);
+ if (op_ == Token::SUB) {
+ // Check whether the value is a smi.
+ Label try_float;
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(ne, &try_float);
+
+ // Go slow case if the value of the expression is zero
+ // to make sure that we switch between 0 and -0.
+ __ cmp(r0, Operand(0));
+ __ b(eq, &slow);
+
+ // The value of the expression is a smi that is not zero. Try
+ // optimistic subtraction '0 - value'.
+ __ rsb(r1, r0, Operand(0), SetCC);
+ __ b(vs, &slow);
+
+ __ mov(r0, Operand(r1)); // Set r0 to result.
+ __ b(&done);
+
+ __ bind(&try_float);
+ __ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE);
+ __ b(ne, &slow);
+ // r0 is a heap number. Get a new heap number in r1.
+ if (overwrite_) {
+ __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+ __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
+ __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+ } else {
+ AllocateHeapNumber(masm, &slow, r1, r2, r3);
+ __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
+ __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+ __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
+ __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
+ __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset));
+ __ mov(r0, Operand(r1));
+ }
+ } else if (op_ == Token::BIT_NOT) {
+ // Check if the operand is a heap number.
+ __ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE);
+ __ b(ne, &slow);
+
+ // Convert the heap number is r0 to an untagged integer in r1.
+ GetInt32(masm, r0, r1, r2, r3, &slow);
+
+ // Do the bitwise operation (move negated) and check if the result
+ // fits in a smi.
+ Label try_float;
+ __ mvn(r1, Operand(r1));
+ __ add(r2, r1, Operand(0x40000000), SetCC);
+ __ b(mi, &try_float);
+ __ mov(r0, Operand(r1, LSL, kSmiTagSize));
+ __ b(&done);
+
+ __ bind(&try_float);
+ if (!overwrite_) {
+ // Allocate a fresh heap number, but don't overwrite r0 until
+ // we're sure we can do it without going through the slow case
+ // that needs the value in r0.
+ AllocateHeapNumber(masm, &slow, r2, r3, r4);
+ __ mov(r0, Operand(r2));
+ }
- // The value of the expression is a smi that is not zero. Try
- // optimistic subtraction '0 - value'.
- __ rsb(r1, r0, Operand(0), SetCC);
- __ b(vs, &slow);
+ // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
+ // have to set up a frame.
+ WriteInt32ToHeapNumberStub stub(r1, r0, r2);
+ __ push(lr);
+ __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
+ __ pop(lr);
+ } else {
+ UNIMPLEMENTED();
+ }
- __ mov(r0, Operand(r1)); // Set r0 to result.
+ __ bind(&done);
__ StubReturn(1);
- // Enter runtime system.
+ // Handle the slow case by jumping to the JavaScript builtin.
__ bind(&slow);
__ push(r0);
- __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_JS);
-
- __ bind(&not_smi);
- __ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE);
- __ b(ne, &slow);
- // r0 is a heap number. Get a new heap number in r1.
- if (overwrite_) {
- __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
- __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- } else {
- AllocateHeapNumber(masm, &slow, r1, r2, r3);
- __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
- __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
- __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
- __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset));
- __ mov(r0, Operand(r1));
+ switch (op_) {
+ case Token::SUB:
+ __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_JS);
+ break;
+ case Token::BIT_NOT:
+ __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_JS);
+ break;
+ default:
+ UNREACHABLE();
}
- __ StubReturn(1);
-}
-
-
-int CEntryStub::MinorKey() {
- ASSERT(result_size_ <= 2);
- // Result returned in r0 or r0+r1 by default.
- return 0;
}
@@ -6165,7 +6304,6 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
Label* throw_normal_exception,
Label* throw_termination_exception,
Label* throw_out_of_memory_exception,
- ExitFrame::Mode mode,
bool do_gc,
bool always_allocate) {
// r0: result parameter for PerformGC, if any
@@ -6225,7 +6363,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// r0:r1: result
// sp: stack pointer
// fp: frame pointer
- __ LeaveExitFrame(mode);
+ __ LeaveExitFrame(mode_);
// check if we should retry or throw exception
Label retry;
@@ -6258,7 +6396,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
}
-void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
+void CEntryStub::Generate(MacroAssembler* masm) {
// Called from JavaScript; parameters are on stack as if calling JS function
// r0: number of arguments including receiver
// r1: pointer to builtin function
@@ -6266,17 +6404,15 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
// sp: stack pointer (restored as callee's sp after C call)
// cp: current context (C callee-saved)
+ // Result returned in r0 or r0+r1 by default.
+
// NOTE: Invocations of builtins may return failure objects
// instead of a proper result. The builtin entry handles
// this by performing a garbage collection and retrying the
// builtin once.
- ExitFrame::Mode mode = is_debug_break
- ? ExitFrame::MODE_DEBUG
- : ExitFrame::MODE_NORMAL;
-
// Enter the exit frame that transitions from JavaScript to C++.
- __ EnterExitFrame(mode);
+ __ EnterExitFrame(mode_);
// r4: number of arguments (C callee-saved)
// r5: pointer to builtin function (C callee-saved)
@@ -6291,7 +6427,6 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
&throw_normal_exception,
&throw_termination_exception,
&throw_out_of_memory_exception,
- mode,
false,
false);
@@ -6300,7 +6435,6 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
&throw_normal_exception,
&throw_termination_exception,
&throw_out_of_memory_exception,
- mode,
true,
false);
@@ -6311,7 +6445,6 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
&throw_normal_exception,
&throw_termination_exception,
&throw_out_of_memory_exception,
- mode,
true,
true);
@@ -6601,6 +6734,33 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
void CallFunctionStub::Generate(MacroAssembler* masm) {
Label slow;
+
+ // If the receiver might be a value (string, number or boolean) check for this
+ // and box it if it is.
+ if (ReceiverMightBeValue()) {
+ // Get the receiver from the stack.
+ // function, receiver [, arguments]
+ Label receiver_is_value, receiver_is_js_object;
+ __ ldr(r1, MemOperand(sp, argc_ * kPointerSize));
+
+ // Check if receiver is a smi (which is a number value).
+ __ BranchOnSmi(r1, &receiver_is_value);
+
+ // Check if the receiver is a valid JS object.
+ __ CompareObjectType(r1, r2, r2, FIRST_JS_OBJECT_TYPE);
+ __ b(ge, &receiver_is_js_object);
+
+ // Call the runtime to box the value.
+ __ bind(&receiver_is_value);
+ __ EnterInternalFrame();
+ __ push(r1);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS);
+ __ LeaveInternalFrame();
+ __ str(r0, MemOperand(sp, argc_ * kPointerSize));
+
+ __ bind(&receiver_is_js_object);
+ }
+
// Get the function to call from the stack.
// function, receiver [, arguments]
__ ldr(r1, MemOperand(sp, (argc_ + 1) * kPointerSize));
@@ -6677,6 +6837,101 @@ int CompareStub::MinorKey() {
}
+
+
+void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4) {
+ Label compare_lengths;
+ // Find minimum length and length difference.
+ __ ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
+ __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
+ __ sub(scratch3, scratch1, Operand(scratch2), SetCC);
+ Register length_delta = scratch3;
+ __ mov(scratch1, scratch2, LeaveCC, gt);
+ Register min_length = scratch1;
+ __ tst(min_length, Operand(min_length));
+ __ b(eq, &compare_lengths);
+
+ // Setup registers so that we only need to increment one register
+ // in the loop.
+ __ add(scratch2, min_length,
+ Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ add(left, left, Operand(scratch2));
+ __ add(right, right, Operand(scratch2));
+ // Registers left and right points to the min_length character of strings.
+ __ rsb(min_length, min_length, Operand(-1));
+ Register index = min_length;
+ // Index starts at -min_length.
+
+ {
+ // Compare loop.
+ Label loop;
+ __ bind(&loop);
+ // Compare characters.
+ __ add(index, index, Operand(1), SetCC);
+ __ ldrb(scratch2, MemOperand(left, index), ne);
+ __ ldrb(scratch4, MemOperand(right, index), ne);
+ // Skip to compare lengths with eq condition true.
+ __ b(eq, &compare_lengths);
+ __ cmp(scratch2, scratch4);
+ __ b(eq, &loop);
+ // Fallthrough with eq condition false.
+ }
+ // Compare lengths - strings up to min-length are equal.
+ __ bind(&compare_lengths);
+ ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
+ // Use zero length_delta as result.
+ __ mov(r0, Operand(length_delta), SetCC, eq);
+ // Fall through to here if characters compare not-equal.
+ __ mov(r0, Operand(Smi::FromInt(GREATER)), LeaveCC, gt);
+ __ mov(r0, Operand(Smi::FromInt(LESS)), LeaveCC, lt);
+ __ Ret();
+}
+
+
+void StringCompareStub::Generate(MacroAssembler* masm) {
+ Label runtime;
+
+ // Stack frame on entry.
+ // sp[0]: return address
+ // sp[4]: right string
+ // sp[8]: left string
+
+ __ ldr(r0, MemOperand(sp, 2 * kPointerSize)); // left
+ __ ldr(r1, MemOperand(sp, 1 * kPointerSize)); // right
+
+ Label not_same;
+ __ cmp(r0, r1);
+ __ b(ne, &not_same);
+ ASSERT_EQ(0, EQUAL);
+ ASSERT_EQ(0, kSmiTag);
+ __ mov(r0, Operand(Smi::FromInt(EQUAL)));
+ __ IncrementCounter(&Counters::string_compare_native, 1, r1, r2);
+ __ add(sp, sp, Operand(2 * kPointerSize));
+ __ Ret();
+
+ __ bind(&not_same);
+
+ // Check that both objects are sequential ascii strings.
+ __ JumpIfNotBothSequentialAsciiStrings(r0, r1, r2, r3, &runtime);
+
+ // Compare flat ascii strings natively. Remove arguments from stack first.
+ __ IncrementCounter(&Counters::string_compare_native, 1, r2, r3);
+ __ add(sp, sp, Operand(2 * kPointerSize));
+ GenerateCompareFlatAsciiStrings(masm, r0, r1, r2, r3, r4, r5);
+
+ // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
+ // tagged as a small integer.
+ __ bind(&runtime);
+ __ TailCallRuntime(ExternalReference(Runtime::kStringCompare), 2, 1);
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h
index b62bc36d7..0384485f1 100644
--- a/deps/v8/src/arm/codegen-arm.h
+++ b/deps/v8/src/arm/codegen-arm.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -32,6 +32,7 @@ namespace v8 {
namespace internal {
// Forward declarations
+class CompilationInfo;
class DeferredCode;
class RegisterAllocator;
class RegisterFile;
@@ -43,57 +44,69 @@ enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
// -------------------------------------------------------------------------
// Reference support
-// A reference is a C++ stack-allocated object that keeps an ECMA
-// reference on the execution stack while in scope. For variables
-// the reference is empty, indicating that it isn't necessary to
-// store state on the stack for keeping track of references to those.
-// For properties, we keep either one (named) or two (indexed) values
-// on the execution stack to represent the reference.
-
+// A reference is a C++ stack-allocated object that puts a
+// reference on the virtual frame. The reference may be consumed
+// by GetValue, TakeValue, SetValue, and Codegen::UnloadReference.
+// When the lifetime (scope) of a valid reference ends, it must have
+// been consumed, and be in state UNLOADED.
class Reference BASE_EMBEDDED {
public:
// The values of the types is important, see size().
- enum Type { ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
- Reference(CodeGenerator* cgen, Expression* expression);
+ enum Type { UNLOADED = -2, ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
+ Reference(CodeGenerator* cgen,
+ Expression* expression,
+ bool persist_after_get = false);
~Reference();
Expression* expression() const { return expression_; }
Type type() const { return type_; }
void set_type(Type value) {
- ASSERT(type_ == ILLEGAL);
+ ASSERT_EQ(ILLEGAL, type_);
type_ = value;
}
+ void set_unloaded() {
+ ASSERT_NE(ILLEGAL, type_);
+ ASSERT_NE(UNLOADED, type_);
+ type_ = UNLOADED;
+ }
// The size the reference takes up on the stack.
- int size() const { return (type_ == ILLEGAL) ? 0 : type_; }
+ int size() const {
+ return (type_ < SLOT) ? 0 : type_;
+ }
bool is_illegal() const { return type_ == ILLEGAL; }
bool is_slot() const { return type_ == SLOT; }
bool is_property() const { return type_ == NAMED || type_ == KEYED; }
+ bool is_unloaded() const { return type_ == UNLOADED; }
// Return the name. Only valid for named property references.
Handle<String> GetName();
// Generate code to push the value of the reference on top of the
// expression stack. The reference is expected to be already on top of
- // the expression stack, and it is left in place with its value above it.
+ // the expression stack, and it is consumed by the call unless the
+ // reference is for a compound assignment.
+ // If the reference is not consumed, it is left in place under its value.
void GetValue();
- // Generate code to push the value of a reference on top of the expression
- // stack and then spill the stack frame. This function is used temporarily
- // while the code generator is being transformed.
+ // Generate code to pop a reference, push the value of the reference,
+ // and then spill the stack frame.
inline void GetValueAndSpill();
// Generate code to store the value on top of the expression stack in the
// reference. The reference is expected to be immediately below the value
- // on the expression stack. The stored value is left in place (with the
- // reference intact below it) to support chained assignments.
+ // on the expression stack. The value is stored in the location specified
+ // by the reference, and is left on top of the stack, after the reference
+ // is popped from beneath it (unloaded).
void SetValue(InitState init_state);
private:
CodeGenerator* cgen_;
Expression* expression_;
Type type_;
+ // Keep the reference on the stack after get, so it can be used by set later.
+ bool persist_after_get_;
};
@@ -137,11 +150,21 @@ class CodeGenState BASE_EMBEDDED {
class CodeGenerator: public AstVisitor {
public:
+ // Compilation mode. Either the compiler is used as the primary
+ // compiler and needs to setup everything or the compiler is used as
+ // the secondary compiler for split compilation and has to handle
+ // bailouts.
+ enum Mode {
+ PRIMARY,
+ SECONDARY
+ };
+
// Takes a function literal, generates code for it. This function should only
// be called by compiler.cc.
static Handle<Code> MakeCode(FunctionLiteral* fun,
Handle<Script> script,
- bool is_eval);
+ bool is_eval,
+ CompilationInfo* info);
// Printing of AST, etc. as requested by flags.
static void MakeCodePrologue(FunctionLiteral* fun);
@@ -189,8 +212,7 @@ class CodeGenerator: public AstVisitor {
private:
// Construction/Destruction
- CodeGenerator(int buffer_size, Handle<Script> script, bool is_eval);
- virtual ~CodeGenerator() { delete masm_; }
+ CodeGenerator(MacroAssembler* masm, Handle<Script> script, bool is_eval);
// Accessors
Scope* scope() const { return scope_; }
@@ -227,7 +249,7 @@ class CodeGenerator: public AstVisitor {
inline void VisitStatementsAndSpill(ZoneList<Statement*>* statements);
// Main code generation function
- void GenCode(FunctionLiteral* fun);
+ void Generate(FunctionLiteral* fun, Mode mode, CompilationInfo* info);
// The following are used by class Reference.
void LoadReference(Reference* ref);
@@ -274,6 +296,9 @@ class CodeGenerator: public AstVisitor {
void LoadFromSlot(Slot* slot, TypeofState typeof_state);
// Store the value on top of the stack to a slot.
void StoreToSlot(Slot* slot, InitState init_state);
+ // Load a keyed property, leaving it in r0. The receiver and key are
+ // passed on the stack, and remain there.
+ void EmitKeyedLoad(bool is_global);
void LoadFromGlobalSlotCheckExtensions(Slot* slot,
TypeofState typeof_state,
@@ -304,7 +329,9 @@ class CodeGenerator: public AstVisitor {
bool reversed,
OverwriteMode mode);
- void CallWithArguments(ZoneList<Expression*>* arguments, int position);
+ void CallWithArguments(ZoneList<Expression*>* arguments,
+ CallFunctionFlags flags,
+ int position);
// Control flow
void Branch(bool if_true, JumpTarget* target);
@@ -339,6 +366,7 @@ class CodeGenerator: public AstVisitor {
void GenerateIsArray(ZoneList<Expression*>* args);
void GenerateIsObject(ZoneList<Expression*>* args);
void GenerateIsFunction(ZoneList<Expression*>* args);
+ void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
// Support for construct call checks.
void GenerateIsConstructCall(ZoneList<Expression*>* args);
@@ -426,33 +454,13 @@ class CodeGenerator: public AstVisitor {
friend class JumpTarget;
friend class Reference;
friend class FastCodeGenerator;
- friend class CodeGenSelector;
+ friend class FullCodeGenerator;
+ friend class FullCodeGenSyntaxChecker;
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};
-class CallFunctionStub: public CodeStub {
- public:
- CallFunctionStub(int argc, InLoopFlag in_loop)
- : argc_(argc), in_loop_(in_loop) {}
-
- void Generate(MacroAssembler* masm);
-
- private:
- int argc_;
- InLoopFlag in_loop_;
-
-#if defined(DEBUG)
- void Print() { PrintF("CallFunctionStub (argc %d)\n", argc_); }
-#endif // defined(DEBUG)
-
- Major MajorKey() { return CallFunction; }
- int MinorKey() { return argc_; }
- InLoopFlag InLoop() { return in_loop_; }
-};
-
-
class GenericBinaryOpStub : public CodeStub {
public:
GenericBinaryOpStub(Token::Value op,
@@ -530,6 +538,28 @@ class GenericBinaryOpStub : public CodeStub {
};
+class StringCompareStub: public CodeStub {
+ public:
+ StringCompareStub() { }
+
+ // Compare two flat ASCII strings and returns result in r0.
+ // Does not use the stack.
+ static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4);
+
+ private:
+ Major MajorKey() { return StringCompare; }
+ int MinorKey() { return 0; }
+
+ void Generate(MacroAssembler* masm);
+};
+
+
} } // namespace v8::internal
#endif // V8_ARM_CODEGEN_ARM_H_
diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/arm/constants-arm.h
index 943220739..8a32c95b6 100644
--- a/deps/v8/src/arm/constants-arm.h
+++ b/deps/v8/src/arm/constants-arm.h
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -237,6 +237,7 @@ class Instr {
inline int RnField() const { return Bits(19, 16); }
inline int RdField() const { return Bits(15, 12); }
+ inline int CoprocessorField() const { return Bits(11, 8); }
// Support for VFP.
// Vn(19-16) | Vd(15-12) | Vm(3-0)
inline int VnField() const { return Bits(19, 16); }
@@ -246,6 +247,8 @@ class Instr {
inline int MField() const { return Bit(5); }
inline int DField() const { return Bit(22); }
inline int RtField() const { return Bits(15, 12); }
+ inline int PField() const { return Bit(24); }
+ inline int UField() const { return Bit(23); }
// Fields used in Data processing instructions
inline Opcode OpcodeField() const {
@@ -296,6 +299,7 @@ class Instr {
inline bool HasB() const { return BField() == 1; }
inline bool HasW() const { return WField() == 1; }
inline bool HasL() const { return LField() == 1; }
+ inline bool HasU() const { return UField() == 1; }
inline bool HasSign() const { return SignField() == 1; }
inline bool HasH() const { return HField() == 1; }
inline bool HasLink() const { return LinkField() == 1; }
diff --git a/deps/v8/src/arm/debug-arm.cc b/deps/v8/src/arm/debug-arm.cc
index fc9808d52..6eb5239b8 100644
--- a/deps/v8/src/arm/debug-arm.cc
+++ b/deps/v8/src/arm/debug-arm.cc
@@ -98,7 +98,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
__ mov(r0, Operand(0)); // no arguments
__ mov(r1, Operand(ExternalReference::debug_break()));
- CEntryDebugBreakStub ceb;
+ CEntryStub ceb(1, ExitFrame::MODE_DEBUG);
__ CallStub(&ceb);
// Restore the register values containing object pointers from the expression
diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc
index afed0fa5c..5b314557d 100644
--- a/deps/v8/src/arm/disasm-arm.cc
+++ b/deps/v8/src/arm/disasm-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2007-2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -998,29 +998,43 @@ void Decoder::DecodeTypeVFP(Instr* instr) {
// Decode Type 6 coprocessor instructions.
// Dm = vmov(Rt, Rt2)
// <Rt, Rt2> = vmov(Dm)
+// Ddst = MEM(Rbase + 4*offset).
+// MEM(Rbase + 4*offset) = Dsrc.
void Decoder::DecodeType6CoprocessorIns(Instr* instr) {
ASSERT((instr->TypeField() == 6));
- if (instr->Bit(23) == 1) {
- Unknown(instr); // Not used by V8.
- } else if (instr->Bit(22) == 1) {
- if ((instr->Bits(27, 24) == 0xC) &&
- (instr->Bit(22) == 1) &&
- (instr->Bits(11, 8) == 0xB) &&
- (instr->Bits(7, 6) == 0x0) &&
- (instr->Bit(4) == 1)) {
- if (instr->Bit(20) == 0) {
- Format(instr, "vmov'cond 'Dm, 'rt, 'rn");
- } else if (instr->Bit(20) == 1) {
- Format(instr, "vmov'cond 'rt, 'rn, 'Dm");
- }
- } else {
- Unknown(instr); // Not used by V8.
- }
- } else if (instr->Bit(21) == 1) {
+ if (instr->CoprocessorField() != 0xB) {
Unknown(instr); // Not used by V8.
} else {
- Unknown(instr); // Not used by V8.
+ switch (instr->OpcodeField()) {
+ case 0x2:
+ // Load and store double to two GP registers
+ if (instr->Bits(7, 4) != 0x1) {
+ Unknown(instr); // Not used by V8.
+ } else if (instr->HasL()) {
+ Format(instr, "vmov'cond 'rt, 'rn, 'Dm");
+ } else {
+ Format(instr, "vmov'cond 'Dm, 'rt, 'rn");
+ }
+ break;
+ case 0x8:
+ if (instr->HasL()) {
+ Format(instr, "vldr'cond 'Dd, ['rn - 4*'off8]");
+ } else {
+ Format(instr, "vstr'cond 'Dd, ['rn - 4*'off8]");
+ }
+ break;
+ case 0xC:
+ if (instr->HasL()) {
+ Format(instr, "vldr'cond 'Dd, ['rn + 4*'off8]");
+ } else {
+ Format(instr, "vstr'cond 'Dd, ['rn + 4*'off8]");
+ }
+ break;
+ default:
+ Unknown(instr); // Not used by V8.
+ break;
+ }
}
}
diff --git a/deps/v8/src/arm/fast-codegen-arm.cc b/deps/v8/src/arm/fast-codegen-arm.cc
index 0d934b5ab..1aeea7ab6 100644
--- a/deps/v8/src/arm/fast-codegen-arm.cc
+++ b/deps/v8/src/arm/fast-codegen-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -28,1711 +28,113 @@
#include "v8.h"
#include "codegen-inl.h"
-#include "compiler.h"
-#include "debug.h"
#include "fast-codegen.h"
-#include "parser.h"
namespace v8 {
namespace internal {
-#define __ ACCESS_MASM(masm_)
+#define __ ACCESS_MASM(masm())
-// Generate code for a JS function. On entry to the function the receiver
-// and arguments have been pushed on the stack left to right. The actual
-// argument count matches the formal parameter count expected by the
-// function.
-//
-// The live registers are:
-// o r1: the JS function object being called (ie, ourselves)
-// o cp: our context
-// o fp: our caller's frame pointer
-// o sp: stack pointer
-// o lr: return address
-//
-// The function builds a JS frame. Please see JavaScriptFrameConstants in
-// frames-arm.h for its layout.
-void FastCodeGenerator::Generate(FunctionLiteral* fun) {
- function_ = fun;
- SetFunctionPosition(fun);
- int locals_count = fun->scope()->num_stack_slots();
-
- __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
- if (locals_count > 0) {
- // Load undefined value here, so the value is ready for the loop below.
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- }
- // Adjust fp to point to caller's fp.
- __ add(fp, sp, Operand(2 * kPointerSize));
-
- { Comment cmnt(masm_, "[ Allocate locals");
- for (int i = 0; i < locals_count; i++) {
- __ push(ip);
- }
- }
-
- bool function_in_register = true;
-
- // Possibly allocate a local context.
- if (fun->scope()->num_heap_slots() > 0) {
- Comment cmnt(masm_, "[ Allocate local context");
- // Argument to NewContext is the function, which is in r1.
- __ push(r1);
- __ CallRuntime(Runtime::kNewContext, 1);
- function_in_register = false;
- // Context is returned in both r0 and cp. It replaces the context
- // passed to us. It's saved in the stack and kept live in cp.
- __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- // Copy any necessary parameters into the context.
- int num_parameters = fun->scope()->num_parameters();
- for (int i = 0; i < num_parameters; i++) {
- Slot* slot = fun->scope()->parameter(i)->slot();
- if (slot != NULL && slot->type() == Slot::CONTEXT) {
- int parameter_offset = StandardFrameConstants::kCallerSPOffset +
- (num_parameters - 1 - i) * kPointerSize;
- // Load parameter from stack.
- __ ldr(r0, MemOperand(fp, parameter_offset));
- // Store it in the context
- __ str(r0, MemOperand(cp, Context::SlotOffset(slot->index())));
- }
- }
- }
-
- Variable* arguments = fun->scope()->arguments()->AsVariable();
- if (arguments != NULL) {
- // Function uses arguments object.
- Comment cmnt(masm_, "[ Allocate arguments object");
- if (!function_in_register) {
- // Load this again, if it's used by the local context below.
- __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- } else {
- __ mov(r3, r1);
- }
- // Receiver is just before the parameters on the caller's stack.
- __ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset +
- fun->num_parameters() * kPointerSize));
- __ mov(r1, Operand(Smi::FromInt(fun->num_parameters())));
- __ stm(db_w, sp, r3.bit() | r2.bit() | r1.bit());
-
- // Arguments to ArgumentsAccessStub:
- // function, receiver address, parameter count.
- // The stub will rewrite receiever and parameter count if the previous
- // stack frame was an arguments adapter frame.
- ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
- __ CallStub(&stub);
- // Duplicate the value; move-to-slot operation might clobber registers.
- __ mov(r3, r0);
- Move(arguments->slot(), r0, r1, r2);
- Slot* dot_arguments_slot =
- fun->scope()->arguments_shadow()->AsVariable()->slot();
- Move(dot_arguments_slot, r3, r1, r2);
- }
-
- // Check the stack for overflow or break request.
- // Put the lr setup instruction in the delay slot. The kInstrSize is
- // added to the implicit 8 byte offset that always applies to operations
- // with pc and gives a return address 12 bytes down.
- { Comment cmnt(masm_, "[ Stack check");
- __ LoadRoot(r2, Heap::kStackLimitRootIndex);
- __ add(lr, pc, Operand(Assembler::kInstrSize));
- __ cmp(sp, Operand(r2));
- StackCheckStub stub;
- __ mov(pc,
- Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
- RelocInfo::CODE_TARGET),
- LeaveCC,
- lo);
- }
-
- { Comment cmnt(masm_, "[ Declarations");
- VisitDeclarations(fun->scope()->declarations());
- }
-
- if (FLAG_trace) {
- __ CallRuntime(Runtime::kTraceEnter, 0);
- }
-
- { Comment cmnt(masm_, "[ Body");
- ASSERT(loop_depth() == 0);
- VisitStatements(fun->body());
- ASSERT(loop_depth() == 0);
- }
-
- { Comment cmnt(masm_, "[ return <undefined>;");
- // Emit a 'return undefined' in case control fell off the end of the
- // body.
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- }
- EmitReturnSequence(function_->end_position());
+void FastCodeGenerator::EmitLoadReceiver(Register reg) {
+ // Offset 2 is due to return address and saved frame pointer.
+ int index = 2 + function()->scope()->num_parameters();
+ __ ldr(reg, MemOperand(sp, index * kPointerSize));
}
-void FastCodeGenerator::EmitReturnSequence(int position) {
- Comment cmnt(masm_, "[ Return sequence");
- if (return_label_.is_bound()) {
- __ b(&return_label_);
- } else {
- __ bind(&return_label_);
- if (FLAG_trace) {
- // Push the return value on the stack as the parameter.
- // Runtime::TraceExit returns its parameter in r0.
- __ push(r0);
- __ CallRuntime(Runtime::kTraceExit, 1);
- }
-
- // Add a label for checking the size of the code used for returning.
- Label check_exit_codesize;
- masm_->bind(&check_exit_codesize);
-
- // Calculate the exact length of the return sequence and make sure that
- // the constant pool is not emitted inside of the return sequence.
- int num_parameters = function_->scope()->num_parameters();
- int32_t sp_delta = (num_parameters + 1) * kPointerSize;
- int return_sequence_length = Assembler::kJSReturnSequenceLength;
- if (!masm_->ImmediateFitsAddrMode1Instruction(sp_delta)) {
- // Additional mov instruction generated.
- return_sequence_length++;
- }
- masm_->BlockConstPoolFor(return_sequence_length);
-
- CodeGenerator::RecordPositions(masm_, position);
- __ RecordJSReturn();
- __ mov(sp, fp);
- __ ldm(ia_w, sp, fp.bit() | lr.bit());
- __ add(sp, sp, Operand(sp_delta));
- __ Jump(lr);
-
- // Check that the size of the code used for returning matches what is
- // expected by the debugger. The add instruction above is an addressing
- // mode 1 instruction where there are restrictions on which immediate values
- // can be encoded in the instruction and which immediate values requires
- // use of an additional instruction for moving the immediate to a temporary
- // register.
- ASSERT_EQ(return_sequence_length,
- masm_->InstructionsGeneratedSince(&check_exit_codesize));
+void FastCodeGenerator::EmitReceiverMapCheck() {
+ Comment cmnt(masm(), ";; MapCheck(this)");
+ if (FLAG_print_ir) {
+ PrintF("MapCheck(this)\n");
}
-}
+ EmitLoadReceiver(r1);
+ __ BranchOnSmi(r1, bailout());
-void FastCodeGenerator::Apply(Expression::Context context,
- Slot* slot,
- Register scratch) {
- switch (context) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect:
- break;
- case Expression::kValue:
- case Expression::kTest:
- case Expression::kValueTest:
- case Expression::kTestValue:
- Move(scratch, slot);
- Apply(context, scratch);
- break;
- }
+ ASSERT(has_receiver() && receiver()->IsHeapObject());
+ Handle<HeapObject> object = Handle<HeapObject>::cast(receiver());
+ Handle<Map> map(object->map());
+ __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ mov(ip, Operand(map));
+ __ cmp(r3, ip);
+ __ b(ne, bailout());
}
-void FastCodeGenerator::Apply(Expression::Context context, Literal* lit) {
- switch (context) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect:
- break;
- case Expression::kValue:
- case Expression::kTest:
- case Expression::kValueTest:
- case Expression::kTestValue:
- __ mov(ip, Operand(lit->handle()));
- Apply(context, ip);
- break;
- }
-}
-
-
-void FastCodeGenerator::ApplyTOS(Expression::Context context) {
- switch (context) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect:
- __ Drop(1);
- break;
- case Expression::kValue:
- break;
- case Expression::kTest:
- __ pop(r0);
- TestAndBranch(r0, true_label_, false_label_);
- break;
- case Expression::kValueTest: {
- Label discard;
- __ ldr(r0, MemOperand(sp, 0));
- TestAndBranch(r0, true_label_, &discard);
- __ bind(&discard);
- __ Drop(1);
- __ jmp(false_label_);
- break;
- }
- case Expression::kTestValue: {
- Label discard;
- __ ldr(r0, MemOperand(sp, 0));
- TestAndBranch(r0, &discard, false_label_);
- __ bind(&discard);
- __ Drop(1);
- __ jmp(true_label_);
- }
- }
-}
-
-
-void FastCodeGenerator::DropAndApply(int count,
- Expression::Context context,
- Register reg) {
- ASSERT(count > 0);
- ASSERT(!reg.is(sp));
- switch (context) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect:
- __ Drop(count);
- break;
- case Expression::kValue:
- if (count > 1) __ Drop(count - 1);
- __ str(reg, MemOperand(sp));
- break;
- case Expression::kTest:
- __ Drop(count);
- TestAndBranch(reg, true_label_, false_label_);
- break;
- case Expression::kValueTest: {
- Label discard;
- if (count > 1) __ Drop(count - 1);
- __ str(reg, MemOperand(sp));
- TestAndBranch(reg, true_label_, &discard);
- __ bind(&discard);
- __ Drop(1);
- __ jmp(false_label_);
- break;
- }
- case Expression::kTestValue: {
- Label discard;
- if (count > 1) __ Drop(count - 1);
- __ str(reg, MemOperand(sp));
- TestAndBranch(reg, &discard, false_label_);
- __ bind(&discard);
- __ Drop(1);
- __ jmp(true_label_);
- break;
- }
- }
-}
-
-
-MemOperand FastCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
- switch (slot->type()) {
- case Slot::PARAMETER:
- case Slot::LOCAL:
- return MemOperand(fp, SlotOffset(slot));
- case Slot::CONTEXT: {
- int context_chain_length =
- function_->scope()->ContextChainLength(slot->var()->scope());
- __ LoadContext(scratch, context_chain_length);
- return CodeGenerator::ContextOperand(scratch, slot->index());
- }
- case Slot::LOOKUP:
- UNREACHABLE();
- }
- UNREACHABLE();
- return MemOperand(r0, 0);
-}
-
-
-void FastCodeGenerator::Move(Register destination, Slot* source) {
- // Use destination as scratch.
- MemOperand location = EmitSlotSearch(source, destination);
- __ ldr(destination, location);
-}
-
-
-
-void FastCodeGenerator::Move(Slot* dst,
- Register src,
- Register scratch1,
- Register scratch2) {
- ASSERT(dst->type() != Slot::LOOKUP); // Not yet implemented.
- ASSERT(!scratch1.is(src) && !scratch2.is(src));
- MemOperand location = EmitSlotSearch(dst, scratch1);
- __ str(src, location);
- // Emit the write barrier code if the location is in the heap.
- if (dst->type() == Slot::CONTEXT) {
- __ mov(scratch2, Operand(Context::SlotOffset(dst->index())));
- __ RecordWrite(scratch1, scratch2, src);
- }
-}
-
-
-
-void FastCodeGenerator::TestAndBranch(Register source,
- Label* true_label,
- Label* false_label) {
- ASSERT_NE(NULL, true_label);
- ASSERT_NE(NULL, false_label);
- // Call the runtime to find the boolean value of the source and then
- // translate it into control flow to the pair of labels.
- __ push(source);
- __ CallRuntime(Runtime::kToBool, 1);
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ cmp(r0, ip);
- __ b(eq, true_label);
- __ jmp(false_label);
-}
-
-
-void FastCodeGenerator::VisitDeclaration(Declaration* decl) {
- Comment cmnt(masm_, "[ Declaration");
- Variable* var = decl->proxy()->var();
- ASSERT(var != NULL); // Must have been resolved.
- Slot* slot = var->slot();
- Property* prop = var->AsProperty();
-
- if (slot != NULL) {
- switch (slot->type()) {
- case Slot::PARAMETER:
- case Slot::LOCAL:
- if (decl->mode() == Variable::CONST) {
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ str(ip, MemOperand(fp, SlotOffset(slot)));
- } else if (decl->fun() != NULL) {
- Visit(decl->fun());
- __ pop(ip);
- __ str(ip, MemOperand(fp, SlotOffset(slot)));
- }
- break;
-
- case Slot::CONTEXT:
- // We bypass the general EmitSlotSearch because we know more about
- // this specific context.
-
- // The variable in the decl always resides in the current context.
- ASSERT_EQ(0, function_->scope()->ContextChainLength(var->scope()));
- if (FLAG_debug_code) {
- // Check if we have the correct context pointer.
- __ ldr(r1,
- CodeGenerator::ContextOperand(cp, Context::FCONTEXT_INDEX));
- __ cmp(r1, cp);
- __ Check(eq, "Unexpected declaration in current context.");
- }
- if (decl->mode() == Variable::CONST) {
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ str(ip, CodeGenerator::ContextOperand(cp, slot->index()));
- // No write barrier since the_hole_value is in old space.
- } else if (decl->fun() != NULL) {
- Visit(decl->fun());
- __ pop(r0);
- __ str(r0, CodeGenerator::ContextOperand(cp, slot->index()));
- int offset = Context::SlotOffset(slot->index());
- __ mov(r2, Operand(offset));
- // We know that we have written a function, which is not a smi.
- __ RecordWrite(cp, r2, r0);
- }
- break;
-
- case Slot::LOOKUP: {
- __ mov(r2, Operand(var->name()));
- // Declaration nodes are always introduced in one of two modes.
- ASSERT(decl->mode() == Variable::VAR ||
- decl->mode() == Variable::CONST);
- PropertyAttributes attr =
- (decl->mode() == Variable::VAR) ? NONE : READ_ONLY;
- __ mov(r1, Operand(Smi::FromInt(attr)));
- // Push initial value, if any.
- // Note: For variables we must not push an initial value (such as
- // 'undefined') because we may have a (legal) redeclaration and we
- // must not destroy the current value.
- if (decl->mode() == Variable::CONST) {
- __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
- __ stm(db_w, sp, cp.bit() | r2.bit() | r1.bit() | r0.bit());
- } else if (decl->fun() != NULL) {
- __ stm(db_w, sp, cp.bit() | r2.bit() | r1.bit());
- Visit(decl->fun()); // Initial value for function decl.
- } else {
- __ mov(r0, Operand(Smi::FromInt(0))); // No initial value!
- __ stm(db_w, sp, cp.bit() | r2.bit() | r1.bit() | r0.bit());
- }
- __ CallRuntime(Runtime::kDeclareContextSlot, 4);
- break;
- }
- }
-
- } else if (prop != NULL) {
- if (decl->fun() != NULL || decl->mode() == Variable::CONST) {
- // We are declaring a function or constant that rewrites to a
- // property. Use (keyed) IC to set the initial value.
- ASSERT_EQ(Expression::kValue, prop->obj()->context());
- Visit(prop->obj());
- ASSERT_EQ(Expression::kValue, prop->key()->context());
- Visit(prop->key());
-
- if (decl->fun() != NULL) {
- ASSERT_EQ(Expression::kValue, decl->fun()->context());
- Visit(decl->fun());
- __ pop(r0);
- } else {
- __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
- }
-
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
-
- // Value in r0 is ignored (declarations are statements). Receiver
- // and key on stack are discarded.
- __ Drop(2);
- }
- }
-}
-
-
-void FastCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
- // Call the runtime to declare the globals.
- // The context is the first argument.
- __ mov(r1, Operand(pairs));
- __ mov(r0, Operand(Smi::FromInt(is_eval_ ? 1 : 0)));
- __ stm(db_w, sp, cp.bit() | r1.bit() | r0.bit());
- __ CallRuntime(Runtime::kDeclareGlobals, 3);
- // Return value is ignored.
-}
-
-
-void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
- Comment cmnt(masm_, "[ FunctionLiteral");
-
- // Build the function boilerplate and instantiate it.
- Handle<JSFunction> boilerplate =
- Compiler::BuildBoilerplate(expr, script_, this);
- if (HasStackOverflow()) return;
-
- ASSERT(boilerplate->IsBoilerplate());
-
- // Create a new closure.
- __ mov(r0, Operand(boilerplate));
- __ stm(db_w, sp, cp.bit() | r0.bit());
- __ CallRuntime(Runtime::kNewClosure, 2);
- Apply(expr->context(), r0);
-}
-
-
-void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
- Comment cmnt(masm_, "[ VariableProxy");
- EmitVariableLoad(expr->var(), expr->context());
-}
-
-
-void FastCodeGenerator::EmitVariableLoad(Variable* var,
- Expression::Context context) {
- Expression* rewrite = var->rewrite();
- if (rewrite == NULL) {
- ASSERT(var->is_global());
- Comment cmnt(masm_, "Global variable");
- // Use inline caching. Variable name is passed in r2 and the global
- // object on the stack.
- __ ldr(ip, CodeGenerator::GlobalObject());
- __ push(ip);
- __ mov(r2, Operand(var->name()));
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
- DropAndApply(1, context, r0);
- } else if (rewrite->AsSlot() != NULL) {
- Slot* slot = rewrite->AsSlot();
- if (FLAG_debug_code) {
- switch (slot->type()) {
- case Slot::PARAMETER:
- case Slot::LOCAL: {
- Comment cmnt(masm_, "Stack slot");
- break;
- }
- case Slot::CONTEXT: {
- Comment cmnt(masm_, "Context slot");
- break;
- }
- case Slot::LOOKUP:
- UNIMPLEMENTED();
- break;
- }
- }
- Apply(context, slot, r0);
- } else {
- Comment cmnt(masm_, "Variable rewritten to property");
- // A variable has been rewritten into an explicit access to an object
- // property.
- Property* property = rewrite->AsProperty();
- ASSERT_NOT_NULL(property);
-
- // The only property expressions that can occur are of the form
- // "slot[literal]".
-
- // Assert that the object is in a slot.
- Variable* object_var = property->obj()->AsVariableProxy()->AsVariable();
- ASSERT_NOT_NULL(object_var);
- Slot* object_slot = object_var->slot();
- ASSERT_NOT_NULL(object_slot);
-
- // Load the object.
- Move(r2, object_slot);
-
- // Assert that the key is a smi.
- Literal* key_literal = property->key()->AsLiteral();
- ASSERT_NOT_NULL(key_literal);
- ASSERT(key_literal->handle()->IsSmi());
-
- // Load the key.
- __ mov(r1, Operand(key_literal->handle()));
-
- // Push both as arguments to ic.
- __ stm(db_w, sp, r2.bit() | r1.bit());
-
- // Do a keyed property load.
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
-
- // Drop key and object left on the stack by IC, and push the result.
- DropAndApply(2, context, r0);
- }
-}
-
-
-void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
- Comment cmnt(masm_, "[ RegExpLiteral");
- Label done;
- // Registers will be used as follows:
- // r4 = JS function, literals array
- // r3 = literal index
- // r2 = RegExp pattern
- // r1 = RegExp flags
- // r0 = temp + return value (RegExp literal)
- __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ ldr(r4, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
- int literal_offset =
- FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
- __ ldr(r0, FieldMemOperand(r4, literal_offset));
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(r0, ip);
- __ b(ne, &done);
- __ mov(r3, Operand(Smi::FromInt(expr->literal_index())));
- __ mov(r2, Operand(expr->pattern()));
- __ mov(r1, Operand(expr->flags()));
- __ stm(db_w, sp, r4.bit() | r3.bit() | r2.bit() | r1.bit());
- __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
- __ bind(&done);
- Apply(expr->context(), r0);
-}
-
-
-void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
- Comment cmnt(masm_, "[ ObjectLiteral");
- __ ldr(r2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ ldr(r2, FieldMemOperand(r2, JSFunction::kLiteralsOffset));
- __ mov(r1, Operand(Smi::FromInt(expr->literal_index())));
- __ mov(r0, Operand(expr->constant_properties()));
- __ stm(db_w, sp, r2.bit() | r1.bit() | r0.bit());
- if (expr->depth() > 1) {
- __ CallRuntime(Runtime::kCreateObjectLiteral, 3);
- } else {
- __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 3);
- }
-
- // If result_saved == true: The result is saved on top of the
- // stack and in r0.
- // If result_saved == false: The result not on the stack, just in r0.
- bool result_saved = false;
-
- for (int i = 0; i < expr->properties()->length(); i++) {
- ObjectLiteral::Property* property = expr->properties()->at(i);
- if (property->IsCompileTimeValue()) continue;
-
- Literal* key = property->key();
- Expression* value = property->value();
- if (!result_saved) {
- __ push(r0); // Save result on stack
- result_saved = true;
- }
- switch (property->kind()) {
- case ObjectLiteral::Property::CONSTANT:
- UNREACHABLE();
-
- case ObjectLiteral::Property::MATERIALIZED_LITERAL: // Fall through.
- ASSERT(!CompileTimeValue::IsCompileTimeValue(property->value()));
- case ObjectLiteral::Property::COMPUTED:
- if (key->handle()->IsSymbol()) {
- Visit(value);
- ASSERT_EQ(Expression::kValue, value->context());
- __ pop(r0);
- __ mov(r2, Operand(key->handle()));
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- // StoreIC leaves the receiver on the stack.
- __ ldr(r0, MemOperand(sp)); // Restore result into r0.
- break;
- }
- // Fall through.
-
- case ObjectLiteral::Property::PROTOTYPE:
- __ push(r0);
- Visit(key);
- ASSERT_EQ(Expression::kValue, key->context());
- Visit(value);
- ASSERT_EQ(Expression::kValue, value->context());
- __ CallRuntime(Runtime::kSetProperty, 3);
- __ ldr(r0, MemOperand(sp)); // Restore result into r0.
- break;
-
- case ObjectLiteral::Property::GETTER:
- case ObjectLiteral::Property::SETTER:
- __ push(r0);
- Visit(key);
- ASSERT_EQ(Expression::kValue, key->context());
- __ mov(r1, Operand(property->kind() == ObjectLiteral::Property::SETTER ?
- Smi::FromInt(1) :
- Smi::FromInt(0)));
- __ push(r1);
- Visit(value);
- ASSERT_EQ(Expression::kValue, value->context());
- __ CallRuntime(Runtime::kDefineAccessor, 4);
- __ ldr(r0, MemOperand(sp)); // Restore result into r0
- break;
- }
- }
- switch (expr->context()) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect:
- if (result_saved) __ Drop(1);
- break;
- case Expression::kValue:
- if (!result_saved) __ push(r0);
- break;
- case Expression::kTest:
- if (result_saved) __ pop(r0);
- TestAndBranch(r0, true_label_, false_label_);
- break;
- case Expression::kValueTest: {
- Label discard;
- if (!result_saved) __ push(r0);
- TestAndBranch(r0, true_label_, &discard);
- __ bind(&discard);
- __ Drop(1);
- __ jmp(false_label_);
- break;
- }
- case Expression::kTestValue: {
- Label discard;
- if (!result_saved) __ push(r0);
- TestAndBranch(r0, &discard, false_label_);
- __ bind(&discard);
- __ Drop(1);
- __ jmp(true_label_);
- break;
- }
- }
-}
-
-
-void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
- Comment cmnt(masm_, "[ ArrayLiteral");
- __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
- __ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
- __ mov(r1, Operand(expr->constant_elements()));
- __ stm(db_w, sp, r3.bit() | r2.bit() | r1.bit());
- if (expr->depth() > 1) {
- __ CallRuntime(Runtime::kCreateArrayLiteral, 3);
- } else {
- __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
- }
-
- bool result_saved = false; // Is the result saved to the stack?
-
- // Emit code to evaluate all the non-constant subexpressions and to store
- // them into the newly cloned array.
- ZoneList<Expression*>* subexprs = expr->values();
- for (int i = 0, len = subexprs->length(); i < len; i++) {
- Expression* subexpr = subexprs->at(i);
- // If the subexpression is a literal or a simple materialized literal it
- // is already set in the cloned array.
- if (subexpr->AsLiteral() != NULL ||
- CompileTimeValue::IsCompileTimeValue(subexpr)) {
- continue;
- }
-
- if (!result_saved) {
- __ push(r0);
- result_saved = true;
- }
- Visit(subexpr);
- ASSERT_EQ(Expression::kValue, subexpr->context());
-
- // Store the subexpression value in the array's elements.
- __ pop(r0); // Subexpression value.
- __ ldr(r1, MemOperand(sp)); // Copy of array literal.
- __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
- int offset = FixedArray::kHeaderSize + (i * kPointerSize);
- __ str(r0, FieldMemOperand(r1, offset));
-
- // Update the write barrier for the array store with r0 as the scratch
- // register.
- __ mov(r2, Operand(offset));
- __ RecordWrite(r1, r2, r0);
- }
-
- switch (expr->context()) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect:
- if (result_saved) __ Drop(1);
- break;
- case Expression::kValue:
- if (!result_saved) __ push(r0);
- break;
- case Expression::kTest:
- if (result_saved) __ pop(r0);
- TestAndBranch(r0, true_label_, false_label_);
- break;
- case Expression::kValueTest: {
- Label discard;
- if (!result_saved) __ push(r0);
- TestAndBranch(r0, true_label_, &discard);
- __ bind(&discard);
- __ Drop(1);
- __ jmp(false_label_);
- break;
- }
- case Expression::kTestValue: {
- Label discard;
- if (!result_saved) __ push(r0);
- TestAndBranch(r0, &discard, false_label_);
- __ bind(&discard);
- __ Drop(1);
- __ jmp(true_label_);
- break;
- }
- }
-}
-
-
-void FastCodeGenerator::EmitNamedPropertyLoad(Property* prop,
- Expression::Context context) {
- SetSourcePosition(prop->position());
- Literal* key = prop->key()->AsLiteral();
- __ mov(r2, Operand(key->handle()));
+void FastCodeGenerator::EmitGlobalVariableLoad(Handle<String> name) {
+ // Compile global variable accesses as load IC calls. The only live
+ // registers are cp (context) and possibly r1 (this). Both are also saved
+ // in the stack and cp is preserved by the call.
+ __ ldr(ip, CodeGenerator::GlobalObject());
+ __ push(ip);
+ __ mov(r2, Operand(name));
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- Apply(context, r0);
-}
-
-
-void FastCodeGenerator::EmitKeyedPropertyLoad(Property* prop,
- Expression::Context context) {
- SetSourcePosition(prop->position());
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- Apply(context, r0);
-}
-
-
-void FastCodeGenerator::EmitCompoundAssignmentOp(Token::Value op,
- Expression::Context context) {
- __ pop(r0);
- __ pop(r1);
- GenericBinaryOpStub stub(op,
- NO_OVERWRITE);
- __ CallStub(&stub);
- Apply(context, r0);
-}
-
-
-void FastCodeGenerator::EmitVariableAssignment(Variable* var,
- Expression::Context context) {
- ASSERT(var != NULL);
- ASSERT(var->is_global() || var->slot() != NULL);
- if (var->is_global()) {
- // Assignment to a global variable. Use inline caching for the
- // assignment. Right-hand-side value is passed in r0, variable name in
- // r2, and the global object on the stack.
- __ pop(r0);
- __ mov(r2, Operand(var->name()));
- __ ldr(ip, CodeGenerator::GlobalObject());
- __ push(ip);
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- // Overwrite the global object on the stack with the result if needed.
- DropAndApply(1, context, r0);
-
- } else if (var->slot() != NULL) {
- Slot* slot = var->slot();
- switch (slot->type()) {
- case Slot::LOCAL:
- case Slot::PARAMETER: {
- MemOperand target = MemOperand(fp, SlotOffset(slot));
- switch (context) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect:
- // Perform assignment and discard value.
- __ pop(r0);
- __ str(r0, target);
- break;
- case Expression::kValue:
- // Perform assignment and preserve value.
- __ ldr(r0, MemOperand(sp));
- __ str(r0, target);
- break;
- case Expression::kTest:
- // Perform assignment and test (and discard) value.
- __ pop(r0);
- __ str(r0, target);
- TestAndBranch(r0, true_label_, false_label_);
- break;
- case Expression::kValueTest: {
- Label discard;
- __ ldr(r0, MemOperand(sp));
- __ str(r0, target);
- TestAndBranch(r0, true_label_, &discard);
- __ bind(&discard);
- __ Drop(1);
- __ jmp(false_label_);
- break;
- }
- case Expression::kTestValue: {
- Label discard;
- __ ldr(r0, MemOperand(sp));
- __ str(r0, target);
- TestAndBranch(r0, &discard, false_label_);
- __ bind(&discard);
- __ Drop(1);
- __ jmp(true_label_);
- break;
- }
- }
- break;
- }
-
- case Slot::CONTEXT: {
- MemOperand target = EmitSlotSearch(slot, r1);
- __ pop(r0);
- __ str(r0, target);
-
- // RecordWrite may destroy all its register arguments.
- if (context == Expression::kValue) {
- __ push(r0);
- } else if (context != Expression::kEffect) {
- __ mov(r3, r0);
- }
- int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
-
- // Update the write barrier for the array store with r0 as the scratch
- // register. Skip the write barrier if the value written (r1) is a smi.
- // The smi test is part of RecordWrite on other platforms, not on arm.
- Label exit;
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &exit);
-
- __ mov(r2, Operand(offset));
- __ RecordWrite(r1, r2, r0);
- __ bind(&exit);
- if (context != Expression::kEffect && context != Expression::kValue) {
- Apply(context, r3);
- }
- break;
- }
-
- case Slot::LOOKUP:
- UNREACHABLE();
- break;
- }
- } else {
- // Variables rewritten as properties are not treated as variables in
- // assignments.
- UNREACHABLE();
- }
-}
-
-
-void FastCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
- // Assignment to a property, using a named store IC.
- Property* prop = expr->target()->AsProperty();
- ASSERT(prop != NULL);
- ASSERT(prop->key()->AsLiteral() != NULL);
-
- // If the assignment starts a block of assignments to the same object,
- // change to slow case to avoid the quadratic behavior of repeatedly
- // adding fast properties.
- if (expr->starts_initialization_block()) {
- __ ldr(ip, MemOperand(sp, kPointerSize)); // Receiver is under value.
- __ push(ip);
- __ CallRuntime(Runtime::kToSlowProperties, 1);
- }
-
- __ pop(r0);
- __ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
-
- // If the assignment ends an initialization block, revert to fast case.
- if (expr->ends_initialization_block()) {
- __ push(r0); // Result of assignment, saved even if not needed.
- __ ldr(ip, MemOperand(sp, kPointerSize)); // Receiver is under value.
- __ push(ip);
- __ CallRuntime(Runtime::kToFastProperties, 1);
- __ pop(r0);
- }
-
- DropAndApply(1, expr->context(), r0);
-}
-
-
-void FastCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
- // Assignment to a property, using a keyed store IC.
-
- // If the assignment starts a block of assignments to the same object,
- // change to slow case to avoid the quadratic behavior of repeatedly
- // adding fast properties.
- if (expr->starts_initialization_block()) {
- // Receiver is under the key and value.
- __ ldr(ip, MemOperand(sp, 2 * kPointerSize));
- __ push(ip);
- __ CallRuntime(Runtime::kToSlowProperties, 1);
- }
-
- __ pop(r0);
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
-
- // If the assignment ends an initialization block, revert to fast case.
- if (expr->ends_initialization_block()) {
- __ push(r0); // Result of assignment, saved even if not needed.
- // Reciever is under the key and value.
- __ ldr(ip, MemOperand(sp, 2 * kPointerSize));
- __ push(ip);
- __ CallRuntime(Runtime::kToFastProperties, 1);
- __ pop(r0);
- }
-
- // Receiver and key are still on stack.
- DropAndApply(2, expr->context(), r0);
-}
-
-
-void FastCodeGenerator::VisitProperty(Property* expr) {
- Comment cmnt(masm_, "[ Property");
- Expression* key = expr->key();
-
- // Record the source position for the property load.
- SetSourcePosition(expr->position());
-
- // Evaluate receiver.
- Visit(expr->obj());
-
- if (key->IsPropertyName()) {
- // Do a named property load. The IC expects the property name in r2 and
- // the receiver on the stack.
- __ mov(r2, Operand(key->AsLiteral()->handle()));
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- DropAndApply(1, expr->context(), r0);
- } else {
- // Do a keyed property load.
- Visit(expr->key());
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- // Drop key and receiver left on the stack by IC.
- DropAndApply(2, expr->context(), r0);
- }
-}
-
-void FastCodeGenerator::EmitCallWithIC(Call* expr,
- Handle<Object> ignored,
- RelocInfo::Mode mode) {
- // Code common for calls using the IC.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Visit(args->at(i));
- ASSERT_EQ(Expression::kValue, args->at(i)->context());
- }
- // Record source position for debugger.
- SetSourcePosition(expr->position());
- // Call the IC initialization code.
- Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
- NOT_IN_LOOP);
- __ Call(ic, mode);
- // Restore context register.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- // Discard the function left on TOS.
- DropAndApply(1, expr->context(), r0);
-}
-
-
-void FastCodeGenerator::EmitCallWithStub(Call* expr) {
- // Code common for calls using the call stub.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Visit(args->at(i));
- }
- // Record source position for debugger.
- SetSourcePosition(expr->position());
- CallFunctionStub stub(arg_count, NOT_IN_LOOP);
- __ CallStub(&stub);
- // Restore context register.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- // Discard the function left on TOS.
- DropAndApply(1, expr->context(), r0);
-}
-
-
-void FastCodeGenerator::VisitCall(Call* expr) {
- Comment cmnt(masm_, "[ Call");
- Expression* fun = expr->expression();
- Variable* var = fun->AsVariableProxy()->AsVariable();
-
- if (var != NULL && var->is_possibly_eval()) {
- // Call to the identifier 'eval'.
- UNREACHABLE();
- } else if (var != NULL && !var->is_this() && var->is_global()) {
- // Call to a global variable.
- __ mov(r1, Operand(var->name()));
- // Push global object as receiver for the call IC lookup.
- __ ldr(r0, CodeGenerator::GlobalObject());
- __ stm(db_w, sp, r1.bit() | r0.bit());
- EmitCallWithIC(expr, var->name(), RelocInfo::CODE_TARGET_CONTEXT);
- } else if (var != NULL && var->slot() != NULL &&
- var->slot()->type() == Slot::LOOKUP) {
- // Call to a lookup slot.
- UNREACHABLE();
- } else if (fun->AsProperty() != NULL) {
- // Call to an object property.
- Property* prop = fun->AsProperty();
- Literal* key = prop->key()->AsLiteral();
- if (key != NULL && key->handle()->IsSymbol()) {
- // Call to a named property, use call IC.
- __ mov(r0, Operand(key->handle()));
- __ push(r0);
- Visit(prop->obj());
- EmitCallWithIC(expr, key->handle(), RelocInfo::CODE_TARGET);
- } else {
- // Call to a keyed property, use keyed load IC followed by function
- // call.
- Visit(prop->obj());
- Visit(prop->key());
- // Record source code position for IC call.
- SetSourcePosition(prop->position());
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- // Load receiver object into r1.
- if (prop->is_synthetic()) {
- __ ldr(r1, CodeGenerator::GlobalObject());
- __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
- } else {
- __ ldr(r1, MemOperand(sp, kPointerSize));
- }
- // Overwrite (object, key) with (function, receiver).
- __ str(r0, MemOperand(sp, kPointerSize));
- __ str(r1, MemOperand(sp));
- EmitCallWithStub(expr);
- }
- } else {
- // Call to some other expression. If the expression is an anonymous
- // function literal not called in a loop, mark it as one that should
- // also use the fast code generator.
- FunctionLiteral* lit = fun->AsFunctionLiteral();
- if (lit != NULL &&
- lit->name()->Equals(Heap::empty_string()) &&
- loop_depth() == 0) {
- lit->set_try_fast_codegen(true);
- }
- Visit(fun);
- // Load global receiver object.
- __ ldr(r1, CodeGenerator::GlobalObject());
- __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
- __ push(r1);
- // Emit function call.
- EmitCallWithStub(expr);
- }
-}
-
-
-void FastCodeGenerator::VisitCallNew(CallNew* expr) {
- Comment cmnt(masm_, "[ CallNew");
- // According to ECMA-262, section 11.2.2, page 44, the function
- // expression in new calls must be evaluated before the
- // arguments.
- // Push function on the stack.
- Visit(expr->expression());
- ASSERT_EQ(Expression::kValue, expr->expression()->context());
-
- // Push global object (receiver).
- __ ldr(r0, CodeGenerator::GlobalObject());
- __ push(r0);
- // Push the arguments ("left-to-right") on the stack.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Visit(args->at(i));
- ASSERT_EQ(Expression::kValue, args->at(i)->context());
- // If location is value, it is already on the stack,
- // so nothing to do here.
- }
-
- // Call the construct call builtin that handles allocation and
- // constructor invocation.
- SetSourcePosition(expr->position());
-
- // Load function, arg_count into r1 and r0.
- __ mov(r0, Operand(arg_count));
- // Function is in sp[arg_count + 1].
- __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
-
- Handle<Code> construct_builtin(Builtins::builtin(Builtins::JSConstructCall));
- __ Call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
-
- // Replace function on TOS with result in r0, or pop it.
- DropAndApply(1, expr->context(), r0);
-}
-
-
-void FastCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
- Comment cmnt(masm_, "[ CallRuntime");
- ZoneList<Expression*>* args = expr->arguments();
-
- if (expr->is_jsruntime()) {
- // Prepare for calling JS runtime function.
- __ mov(r1, Operand(expr->name()));
- __ ldr(r0, CodeGenerator::GlobalObject());
- __ ldr(r0, FieldMemOperand(r0, GlobalObject::kBuiltinsOffset));
- __ stm(db_w, sp, r1.bit() | r0.bit());
- }
-
- // Push the arguments ("left-to-right").
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Visit(args->at(i));
- ASSERT_EQ(Expression::kValue, args->at(i)->context());
- }
-
- if (expr->is_jsruntime()) {
- // Call the JS runtime function.
- Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
- NOT_IN_LOOP);
- __ Call(ic, RelocInfo::CODE_TARGET);
- // Restore context register.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- // Discard the function left on TOS.
- DropAndApply(1, expr->context(), r0);
- } else {
- // Call the C runtime function.
- __ CallRuntime(expr->function(), arg_count);
- Apply(expr->context(), r0);
+ __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ if (has_this_properties()) {
+ // Restore this.
+ EmitLoadReceiver(r1);
}
}
-void FastCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
- switch (expr->op()) {
- case Token::VOID: {
- Comment cmnt(masm_, "[ UnaryOperation (VOID)");
- Visit(expr->expression());
- ASSERT_EQ(Expression::kEffect, expr->expression()->context());
- switch (expr->context()) {
- case Expression::kUninitialized:
- UNREACHABLE();
- break;
- case Expression::kEffect:
- break;
- case Expression::kValue:
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ push(ip);
- break;
- case Expression::kTestValue:
- // Value is false so it's needed.
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ push(ip);
- case Expression::kTest:
- case Expression::kValueTest:
- __ jmp(false_label_);
- break;
- }
- break;
- }
-
- case Token::NOT: {
- Comment cmnt(masm_, "[ UnaryOperation (NOT)");
- ASSERT_EQ(Expression::kTest, expr->expression()->context());
-
- Label push_true, push_false, done;
- switch (expr->context()) {
- case Expression::kUninitialized:
- UNREACHABLE();
- break;
-
- case Expression::kEffect:
- VisitForControl(expr->expression(), &done, &done);
- __ bind(&done);
- break;
-
- case Expression::kValue:
- VisitForControl(expr->expression(), &push_false, &push_true);
- __ bind(&push_true);
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ push(ip);
- __ jmp(&done);
- __ bind(&push_false);
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ push(ip);
- __ bind(&done);
- break;
-
- case Expression::kTest:
- VisitForControl(expr->expression(), false_label_, true_label_);
- break;
-
- case Expression::kValueTest:
- VisitForControl(expr->expression(), false_label_, &push_true);
- __ bind(&push_true);
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ push(ip);
- __ jmp(true_label_);
- break;
-
- case Expression::kTestValue:
- VisitForControl(expr->expression(), &push_false, true_label_);
- __ bind(&push_false);
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ push(ip);
- __ jmp(false_label_);
- break;
- }
- break;
- }
-
- case Token::TYPEOF: {
- Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
- ASSERT_EQ(Expression::kValue, expr->expression()->context());
+void FastCodeGenerator::EmitThisPropertyStore(Handle<String> name) {
+ LookupResult lookup;
+ receiver()->Lookup(*name, &lookup);
- VariableProxy* proxy = expr->expression()->AsVariableProxy();
- if (proxy != NULL &&
- !proxy->var()->is_this() &&
- proxy->var()->is_global()) {
- Comment cmnt(masm_, "Global variable");
- __ ldr(r0, CodeGenerator::GlobalObject());
- __ push(r0);
- __ mov(r2, Operand(proxy->name()));
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- // Use a regular load, not a contextual load, to avoid a reference
- // error.
- __ Call(ic, RelocInfo::CODE_TARGET);
- __ str(r0, MemOperand(sp));
- } else if (proxy != NULL &&
- proxy->var()->slot() != NULL &&
- proxy->var()->slot()->type() == Slot::LOOKUP) {
- __ mov(r0, Operand(proxy->name()));
- __ stm(db_w, sp, cp.bit() | r0.bit());
- __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
- __ push(r0);
- } else {
- // This expression cannot throw a reference error at the top level.
- Visit(expr->expression());
- }
-
- __ CallRuntime(Runtime::kTypeof, 1);
- Apply(expr->context(), r0);
- break;
- }
-
- default:
- UNREACHABLE();
- }
-}
-
-
-void FastCodeGenerator::VisitCountOperation(CountOperation* expr) {
- Comment cmnt(masm_, "[ CountOperation");
-
- // Expression can only be a property, a global or a (parameter or local)
- // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
- enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
- LhsKind assign_type = VARIABLE;
- Property* prop = expr->expression()->AsProperty();
- // In case of a property we use the uninitialized expression context
- // of the key to detect a named property.
- if (prop != NULL) {
- assign_type = (prop->key()->context() == Expression::kUninitialized)
- ? NAMED_PROPERTY
- : KEYED_PROPERTY;
- }
+ ASSERT(lookup.holder() == *receiver());
+ ASSERT(lookup.type() == FIELD);
+ Handle<Map> map(Handle<HeapObject>::cast(receiver())->map());
+ int index = lookup.GetFieldIndex() - map->inobject_properties();
+ int offset = index * kPointerSize;
- // Evaluate expression and get value.
- if (assign_type == VARIABLE) {
- ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
- EmitVariableLoad(expr->expression()->AsVariableProxy()->var(),
- Expression::kValue);
+ // Negative offsets are inobject properties.
+ if (offset < 0) {
+ offset += map->instance_size();
+ __ mov(r2, r1); // Copy receiver for write barrier.
} else {
- // Reserve space for result of postfix operation.
- if (expr->is_postfix() && expr->context() != Expression::kEffect) {
- ASSERT(expr->context() != Expression::kUninitialized);
- __ mov(ip, Operand(Smi::FromInt(0)));
- __ push(ip);
- }
- Visit(prop->obj());
- ASSERT_EQ(Expression::kValue, prop->obj()->context());
- if (assign_type == NAMED_PROPERTY) {
- EmitNamedPropertyLoad(prop, Expression::kValue);
- } else {
- Visit(prop->key());
- ASSERT_EQ(Expression::kValue, prop->key()->context());
- EmitKeyedPropertyLoad(prop, Expression::kValue);
- }
- }
-
- // Convert to number.
- __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS);
-
- // Save result for postfix expressions.
- if (expr->is_postfix()) {
- switch (expr->context()) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect:
- // Do not save result.
- break;
- case Expression::kValue:
- case Expression::kTest:
- case Expression::kTestValue:
- case Expression::kValueTest:
- // Save the result on the stack. If we have a named or keyed property
- // we store the result under the receiver that is currently on top
- // of the stack.
- switch (assign_type) {
- case VARIABLE:
- __ push(r0);
- break;
- case NAMED_PROPERTY:
- __ str(r0, MemOperand(sp, kPointerSize));
- break;
- case KEYED_PROPERTY:
- __ str(r0, MemOperand(sp, 2 * kPointerSize));
- break;
- }
- break;
- }
- }
-
- // Call runtime for +1/-1.
- if (expr->op() == Token::INC) {
- __ mov(ip, Operand(Smi::FromInt(1)));
- } else {
- __ mov(ip, Operand(Smi::FromInt(-1)));
- }
- __ stm(db_w, sp, ip.bit() | r0.bit());
- __ CallRuntime(Runtime::kNumberAdd, 2);
-
- // Store the value returned in r0.
- switch (assign_type) {
- case VARIABLE:
- __ push(r0);
- if (expr->is_postfix()) {
- EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Expression::kEffect);
- // For all contexts except kEffect: We have the result on
- // top of the stack.
- if (expr->context() != Expression::kEffect) {
- ApplyTOS(expr->context());
- }
- } else {
- EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- expr->context());
- }
- break;
- case NAMED_PROPERTY: {
- __ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- if (expr->is_postfix()) {
- __ Drop(1); // Result is on the stack under the receiver.
- if (expr->context() != Expression::kEffect) {
- ApplyTOS(expr->context());
- }
- } else {
- DropAndApply(1, expr->context(), r0);
- }
- break;
- }
- case KEYED_PROPERTY: {
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- if (expr->is_postfix()) {
- __ Drop(2); // Result is on the stack under the key and the receiver.
- if (expr->context() != Expression::kEffect) {
- ApplyTOS(expr->context());
- }
- } else {
- DropAndApply(2, expr->context(), r0);
- }
- break;
- }
- }
-}
-
-
-void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
- Comment cmnt(masm_, "[ BinaryOperation");
- switch (expr->op()) {
- case Token::COMMA:
- ASSERT_EQ(Expression::kEffect, expr->left()->context());
- ASSERT_EQ(expr->context(), expr->right()->context());
- Visit(expr->left());
- Visit(expr->right());
- break;
-
- case Token::OR:
- case Token::AND:
- EmitLogicalOperation(expr);
- break;
-
- case Token::ADD:
- case Token::SUB:
- case Token::DIV:
- case Token::MOD:
- case Token::MUL:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SHL:
- case Token::SHR:
- case Token::SAR: {
- ASSERT_EQ(Expression::kValue, expr->left()->context());
- ASSERT_EQ(Expression::kValue, expr->right()->context());
-
- Visit(expr->left());
- Visit(expr->right());
- __ pop(r0);
- __ pop(r1);
- GenericBinaryOpStub stub(expr->op(),
- NO_OVERWRITE);
- __ CallStub(&stub);
- Apply(expr->context(), r0);
-
- break;
- }
- default:
- UNREACHABLE();
+ offset += FixedArray::kHeaderSize;
+ __ ldr(r2, FieldMemOperand(r1, JSObject::kPropertiesOffset));
}
+ // Perform the store.
+ __ str(r0, FieldMemOperand(r2, offset));
+ __ mov(r3, Operand(offset));
+ __ RecordWrite(r2, r3, ip);
}
-void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
- Comment cmnt(masm_, "[ CompareOperation");
- ASSERT_EQ(Expression::kValue, expr->left()->context());
- ASSERT_EQ(Expression::kValue, expr->right()->context());
- Visit(expr->left());
- Visit(expr->right());
-
- // Always perform the comparison for its control flow. Pack the result
- // into the expression's context after the comparison is performed.
- Label push_true, push_false, done;
- // Initially assume we are in a test context.
- Label* if_true = true_label_;
- Label* if_false = false_label_;
- switch (expr->context()) {
- case Expression::kUninitialized:
- UNREACHABLE();
- break;
- case Expression::kEffect:
- if_true = &done;
- if_false = &done;
- break;
- case Expression::kValue:
- if_true = &push_true;
- if_false = &push_false;
- break;
- case Expression::kTest:
- break;
- case Expression::kValueTest:
- if_true = &push_true;
- break;
- case Expression::kTestValue:
- if_false = &push_false;
- break;
- }
-
- switch (expr->op()) {
- case Token::IN: {
- __ InvokeBuiltin(Builtins::IN, CALL_JS);
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ cmp(r0, ip);
- __ b(eq, if_true);
- __ jmp(if_false);
- break;
- }
-
- case Token::INSTANCEOF: {
- InstanceofStub stub;
- __ CallStub(&stub);
- __ tst(r0, r0);
- __ b(eq, if_true); // The stub returns 0 for true.
- __ jmp(if_false);
- break;
- }
-
- default: {
- Condition cc = eq;
- bool strict = false;
- switch (expr->op()) {
- case Token::EQ_STRICT:
- strict = true;
- // Fall through
- case Token::EQ:
- cc = eq;
- __ pop(r0);
- __ pop(r1);
- break;
- case Token::LT:
- cc = lt;
- __ pop(r0);
- __ pop(r1);
- break;
- case Token::GT:
- // Reverse left and right sizes to obtain ECMA-262 conversion order.
- cc = lt;
- __ pop(r1);
- __ pop(r0);
- break;
- case Token::LTE:
- // Reverse left and right sizes to obtain ECMA-262 conversion order.
- cc = ge;
- __ pop(r1);
- __ pop(r0);
- break;
- case Token::GTE:
- cc = ge;
- __ pop(r0);
- __ pop(r1);
- break;
- case Token::IN:
- case Token::INSTANCEOF:
- default:
- UNREACHABLE();
- }
-
- // The comparison stub expects the smi vs. smi case to be handled
- // before it is called.
- Label slow_case;
- __ orr(r2, r0, Operand(r1));
- __ tst(r2, Operand(kSmiTagMask));
- __ b(ne, &slow_case);
- __ cmp(r1, r0);
- __ b(cc, if_true);
- __ jmp(if_false);
-
- __ bind(&slow_case);
- CompareStub stub(cc, strict);
- __ CallStub(&stub);
- __ tst(r0, r0);
- __ b(cc, if_true);
- __ jmp(if_false);
- }
- }
-
- // Convert the result of the comparison into one expected for this
- // expression's context.
- switch (expr->context()) {
- case Expression::kUninitialized:
- UNREACHABLE();
- break;
-
- case Expression::kEffect:
- __ bind(&done);
- break;
-
- case Expression::kValue:
- __ bind(&push_true);
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ push(ip);
- __ jmp(&done);
- __ bind(&push_false);
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ push(ip);
- __ bind(&done);
- break;
-
- case Expression::kTest:
- break;
-
- case Expression::kValueTest:
- __ bind(&push_true);
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ push(ip);
- __ jmp(true_label_);
- break;
-
- case Expression::kTestValue:
- __ bind(&push_false);
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ push(ip);
- __ jmp(false_label_);
- break;
- }
-}
-
-
-void FastCodeGenerator::VisitThisFunction(ThisFunction* expr) {
- __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- Apply(expr->context(), r0);
-}
-
-
-Register FastCodeGenerator::result_register() { return r0; }
-
-
-Register FastCodeGenerator::context_register() { return cp; }
-
-
-void FastCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
- ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
- __ str(value, MemOperand(fp, frame_offset));
-}
-
+void FastCodeGenerator::Generate(FunctionLiteral* fun, CompilationInfo* info) {
+ ASSERT(function_ == NULL);
+ ASSERT(info_ == NULL);
+ function_ = fun;
+ info_ = info;
-void FastCodeGenerator::LoadContextField(Register dst, int context_index) {
- __ ldr(dst, CodeGenerator::ContextOperand(cp, context_index));
-}
+ // Save the caller's frame pointer and set up our own.
+ Comment prologue_cmnt(masm(), ";; Prologue");
+ __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
+ __ add(fp, sp, Operand(2 * kPointerSize));
+ // Note that we keep a live register reference to cp (context) at
+ // this point.
+ // Receiver (this) is allocated to r1 if there are this properties.
+ if (has_this_properties()) EmitReceiverMapCheck();
-// ----------------------------------------------------------------------------
-// Non-local control flow support.
+ VisitStatements(fun->body());
-void FastCodeGenerator::EnterFinallyBlock() {
- ASSERT(!result_register().is(r1));
- // Store result register while executing finally block.
- __ push(result_register());
- // Cook return address in link register to stack (smi encoded Code* delta)
- __ sub(r1, lr, Operand(masm_->CodeObject()));
- ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
- ASSERT_EQ(0, kSmiTag);
- __ add(r1, r1, Operand(r1)); // Convert to smi.
- __ push(r1);
-}
+ Comment return_cmnt(masm(), ";; Return(<undefined>)");
+ __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+ Comment epilogue_cmnt(masm(), ";; Epilogue");
+ __ mov(sp, fp);
+ __ ldm(ia_w, sp, fp.bit() | lr.bit());
+ int32_t sp_delta = (fun->scope()->num_parameters() + 1) * kPointerSize;
+ __ add(sp, sp, Operand(sp_delta));
+ __ Jump(lr);
-void FastCodeGenerator::ExitFinallyBlock() {
- ASSERT(!result_register().is(r1));
- // Restore result register from stack.
- __ pop(r1);
- // Uncook return address and return.
- __ pop(result_register());
- ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
- __ mov(r1, Operand(r1, ASR, 1)); // Un-smi-tag value.
- __ add(pc, r1, Operand(masm_->CodeObject()));
+ __ bind(&bailout_);
}
#undef __
+
} } // namespace v8::internal
diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc
new file mode 100644
index 000000000..9f240dd82
--- /dev/null
+++ b/deps/v8/src/arm/full-codegen-arm.cc
@@ -0,0 +1,1781 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "compiler.h"
+#include "debug.h"
+#include "full-codegen.h"
+#include "parser.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm_)
+
+// Generate code for a JS function. On entry to the function the receiver
+// and arguments have been pushed on the stack left to right. The actual
+// argument count matches the formal parameter count expected by the
+// function.
+//
+// The live registers are:
+// o r1: the JS function object being called (ie, ourselves)
+// o cp: our context
+// o fp: our caller's frame pointer
+// o sp: stack pointer
+// o lr: return address
+//
+// The function builds a JS frame. Please see JavaScriptFrameConstants in
+// frames-arm.h for its layout.
+void FullCodeGenerator::Generate(FunctionLiteral* fun, Mode mode) {
+ function_ = fun;
+ SetFunctionPosition(fun);
+
+ if (mode == PRIMARY) {
+ int locals_count = fun->scope()->num_stack_slots();
+
+ __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
+ if (locals_count > 0) {
+ // Load undefined value here, so the value is ready for the loop
+ // below.
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ }
+ // Adjust fp to point to caller's fp.
+ __ add(fp, sp, Operand(2 * kPointerSize));
+
+ { Comment cmnt(masm_, "[ Allocate locals");
+ for (int i = 0; i < locals_count; i++) {
+ __ push(ip);
+ }
+ }
+
+ bool function_in_register = true;
+
+ // Possibly allocate a local context.
+ if (fun->scope()->num_heap_slots() > 0) {
+ Comment cmnt(masm_, "[ Allocate local context");
+ // Argument to NewContext is the function, which is in r1.
+ __ push(r1);
+ __ CallRuntime(Runtime::kNewContext, 1);
+ function_in_register = false;
+ // Context is returned in both r0 and cp. It replaces the context
+ // passed to us. It's saved in the stack and kept live in cp.
+ __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Copy any necessary parameters into the context.
+ int num_parameters = fun->scope()->num_parameters();
+ for (int i = 0; i < num_parameters; i++) {
+ Slot* slot = fun->scope()->parameter(i)->slot();
+ if (slot != NULL && slot->type() == Slot::CONTEXT) {
+ int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+ (num_parameters - 1 - i) * kPointerSize;
+ // Load parameter from stack.
+ __ ldr(r0, MemOperand(fp, parameter_offset));
+ // Store it in the context.
+ __ mov(r1, Operand(Context::SlotOffset(slot->index())));
+ __ str(r0, MemOperand(cp, r1));
+ // Update the write barrier. This clobbers all involved
+ // registers, so we have use a third register to avoid
+ // clobbering cp.
+ __ mov(r2, Operand(cp));
+ __ RecordWrite(r2, r1, r0);
+ }
+ }
+ }
+
+ Variable* arguments = fun->scope()->arguments()->AsVariable();
+ if (arguments != NULL) {
+ // Function uses arguments object.
+ Comment cmnt(masm_, "[ Allocate arguments object");
+ if (!function_in_register) {
+ // Load this again, if it's used by the local context below.
+ __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ } else {
+ __ mov(r3, r1);
+ }
+ // Receiver is just before the parameters on the caller's stack.
+ __ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset +
+ fun->num_parameters() * kPointerSize));
+ __ mov(r1, Operand(Smi::FromInt(fun->num_parameters())));
+ __ stm(db_w, sp, r3.bit() | r2.bit() | r1.bit());
+
+ // Arguments to ArgumentsAccessStub:
+ // function, receiver address, parameter count.
+ // The stub will rewrite receiever and parameter count if the previous
+ // stack frame was an arguments adapter frame.
+ ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
+ __ CallStub(&stub);
+ // Duplicate the value; move-to-slot operation might clobber registers.
+ __ mov(r3, r0);
+ Move(arguments->slot(), r0, r1, r2);
+ Slot* dot_arguments_slot =
+ fun->scope()->arguments_shadow()->AsVariable()->slot();
+ Move(dot_arguments_slot, r3, r1, r2);
+ }
+ }
+
+ // Check the stack for overflow or break request.
+ // Put the lr setup instruction in the delay slot. The kInstrSize is
+ // added to the implicit 8 byte offset that always applies to operations
+ // with pc and gives a return address 12 bytes down.
+ { Comment cmnt(masm_, "[ Stack check");
+ __ LoadRoot(r2, Heap::kStackLimitRootIndex);
+ __ add(lr, pc, Operand(Assembler::kInstrSize));
+ __ cmp(sp, Operand(r2));
+ StackCheckStub stub;
+ __ mov(pc,
+ Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
+ RelocInfo::CODE_TARGET),
+ LeaveCC,
+ lo);
+ }
+
+ { Comment cmnt(masm_, "[ Declarations");
+ VisitDeclarations(fun->scope()->declarations());
+ }
+
+ if (FLAG_trace) {
+ __ CallRuntime(Runtime::kTraceEnter, 0);
+ }
+
+ { Comment cmnt(masm_, "[ Body");
+ ASSERT(loop_depth() == 0);
+ VisitStatements(fun->body());
+ ASSERT(loop_depth() == 0);
+ }
+
+ { Comment cmnt(masm_, "[ return <undefined>;");
+ // Emit a 'return undefined' in case control fell off the end of the
+ // body.
+ __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+ }
+ EmitReturnSequence(function_->end_position());
+}
+
+
+void FullCodeGenerator::EmitReturnSequence(int position) {
+ Comment cmnt(masm_, "[ Return sequence");
+ if (return_label_.is_bound()) {
+ __ b(&return_label_);
+ } else {
+ __ bind(&return_label_);
+ if (FLAG_trace) {
+ // Push the return value on the stack as the parameter.
+ // Runtime::TraceExit returns its parameter in r0.
+ __ push(r0);
+ __ CallRuntime(Runtime::kTraceExit, 1);
+ }
+
+ // Add a label for checking the size of the code used for returning.
+ Label check_exit_codesize;
+ masm_->bind(&check_exit_codesize);
+
+ // Calculate the exact length of the return sequence and make sure that
+ // the constant pool is not emitted inside of the return sequence.
+ int num_parameters = function_->scope()->num_parameters();
+ int32_t sp_delta = (num_parameters + 1) * kPointerSize;
+ int return_sequence_length = Assembler::kJSReturnSequenceLength;
+ if (!masm_->ImmediateFitsAddrMode1Instruction(sp_delta)) {
+ // Additional mov instruction generated.
+ return_sequence_length++;
+ }
+ masm_->BlockConstPoolFor(return_sequence_length);
+
+ CodeGenerator::RecordPositions(masm_, position);
+ __ RecordJSReturn();
+ __ mov(sp, fp);
+ __ ldm(ia_w, sp, fp.bit() | lr.bit());
+ __ add(sp, sp, Operand(sp_delta));
+ __ Jump(lr);
+
+ // Check that the size of the code used for returning matches what is
+ // expected by the debugger. The add instruction above is an addressing
+ // mode 1 instruction where there are restrictions on which immediate values
+ // can be encoded in the instruction and which immediate values requires
+ // use of an additional instruction for moving the immediate to a temporary
+ // register.
+ ASSERT_EQ(return_sequence_length,
+ masm_->InstructionsGeneratedSince(&check_exit_codesize));
+ }
+}
+
+
+void FullCodeGenerator::Apply(Expression::Context context, Register reg) {
+ switch (context) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+
+ case Expression::kEffect:
+ // Nothing to do.
+ break;
+
+ case Expression::kValue:
+ // Move value into place.
+ switch (location_) {
+ case kAccumulator:
+ if (!reg.is(result_register())) __ mov(result_register(), reg);
+ break;
+ case kStack:
+ __ push(reg);
+ break;
+ }
+ break;
+
+ case Expression::kValueTest:
+ case Expression::kTestValue:
+ // Push an extra copy of the value in case it's needed.
+ __ push(reg);
+ // Fall through.
+
+ case Expression::kTest:
+ // We always call the runtime on ARM, so push the value as argument.
+ __ push(reg);
+ DoTest(context);
+ break;
+ }
+}
+
+
+void FullCodeGenerator::Apply(Expression::Context context, Slot* slot) {
+ switch (context) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ // Nothing to do.
+ break;
+ case Expression::kValue:
+ case Expression::kTest:
+ case Expression::kValueTest:
+ case Expression::kTestValue:
+ // On ARM we have to move the value into a register to do anything
+ // with it.
+ Move(result_register(), slot);
+ Apply(context, result_register());
+ break;
+ }
+}
+
+
+void FullCodeGenerator::Apply(Expression::Context context, Literal* lit) {
+ switch (context) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ break;
+ // Nothing to do.
+ case Expression::kValue:
+ case Expression::kTest:
+ case Expression::kValueTest:
+ case Expression::kTestValue:
+ // On ARM we have to move the value into a register to do anything
+ // with it.
+ __ mov(result_register(), Operand(lit->handle()));
+ Apply(context, result_register());
+ break;
+ }
+}
+
+
+void FullCodeGenerator::ApplyTOS(Expression::Context context) {
+ switch (context) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+
+ case Expression::kEffect:
+ __ Drop(1);
+ break;
+
+ case Expression::kValue:
+ switch (location_) {
+ case kAccumulator:
+ __ pop(result_register());
+ break;
+ case kStack:
+ break;
+ }
+ break;
+
+ case Expression::kValueTest:
+ case Expression::kTestValue:
+ // Duplicate the value on the stack in case it's needed.
+ __ ldr(ip, MemOperand(sp));
+ __ push(ip);
+ // Fall through.
+
+ case Expression::kTest:
+ DoTest(context);
+ break;
+ }
+}
+
+
+void FullCodeGenerator::DropAndApply(int count,
+ Expression::Context context,
+ Register reg) {
+ ASSERT(count > 0);
+ ASSERT(!reg.is(sp));
+ switch (context) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+
+ case Expression::kEffect:
+ __ Drop(count);
+ break;
+
+ case Expression::kValue:
+ switch (location_) {
+ case kAccumulator:
+ __ Drop(count);
+ if (!reg.is(result_register())) __ mov(result_register(), reg);
+ break;
+ case kStack:
+ if (count > 1) __ Drop(count - 1);
+ __ str(reg, MemOperand(sp));
+ break;
+ }
+ break;
+
+ case Expression::kTest:
+ if (count > 1) __ Drop(count - 1);
+ __ str(reg, MemOperand(sp));
+ DoTest(context);
+ break;
+
+ case Expression::kValueTest:
+ case Expression::kTestValue:
+ if (count == 1) {
+ __ str(reg, MemOperand(sp));
+ __ push(reg);
+ } else { // count > 1
+ __ Drop(count - 2);
+ __ str(reg, MemOperand(sp, kPointerSize));
+ __ str(reg, MemOperand(sp));
+ }
+ DoTest(context);
+ break;
+ }
+}
+
+
+void FullCodeGenerator::Apply(Expression::Context context,
+ Label* materialize_true,
+ Label* materialize_false) {
+ switch (context) {
+ case Expression::kUninitialized:
+
+ case Expression::kEffect:
+ ASSERT_EQ(materialize_true, materialize_false);
+ __ bind(materialize_true);
+ break;
+
+ case Expression::kValue: {
+ Label done;
+ __ bind(materialize_true);
+ __ mov(result_register(), Operand(Factory::true_value()));
+ __ jmp(&done);
+ __ bind(materialize_false);
+ __ mov(result_register(), Operand(Factory::false_value()));
+ __ bind(&done);
+ switch (location_) {
+ case kAccumulator:
+ break;
+ case kStack:
+ __ push(result_register());
+ break;
+ }
+ break;
+ }
+
+ case Expression::kTest:
+ break;
+
+ case Expression::kValueTest:
+ __ bind(materialize_true);
+ __ mov(result_register(), Operand(Factory::true_value()));
+ switch (location_) {
+ case kAccumulator:
+ break;
+ case kStack:
+ __ push(result_register());
+ break;
+ }
+ __ jmp(true_label_);
+ break;
+
+ case Expression::kTestValue:
+ __ bind(materialize_false);
+ __ mov(result_register(), Operand(Factory::false_value()));
+ switch (location_) {
+ case kAccumulator:
+ break;
+ case kStack:
+ __ push(result_register());
+ break;
+ }
+ __ jmp(false_label_);
+ break;
+ }
+}
+
+
+void FullCodeGenerator::DoTest(Expression::Context context) {
+ // The value to test is pushed on the stack, and duplicated on the stack
+ // if necessary (for value/test and test/value contexts).
+ ASSERT_NE(NULL, true_label_);
+ ASSERT_NE(NULL, false_label_);
+
+ // Call the runtime to find the boolean value of the source and then
+ // translate it into control flow to the pair of labels.
+ __ CallRuntime(Runtime::kToBool, 1);
+ __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+ __ cmp(r0, ip);
+
+ // Complete based on the context.
+ switch (context) {
+ case Expression::kUninitialized:
+ case Expression::kEffect:
+ case Expression::kValue:
+ UNREACHABLE();
+
+ case Expression::kTest:
+ __ b(eq, true_label_);
+ __ jmp(false_label_);
+ break;
+
+ case Expression::kValueTest: {
+ Label discard;
+ switch (location_) {
+ case kAccumulator:
+ __ b(ne, &discard);
+ __ pop(result_register());
+ __ jmp(true_label_);
+ break;
+ case kStack:
+ __ b(eq, true_label_);
+ break;
+ }
+ __ bind(&discard);
+ __ Drop(1);
+ __ jmp(false_label_);
+ break;
+ }
+
+ case Expression::kTestValue: {
+ Label discard;
+ switch (location_) {
+ case kAccumulator:
+ __ b(eq, &discard);
+ __ pop(result_register());
+ __ jmp(false_label_);
+ break;
+ case kStack:
+ __ b(ne, false_label_);
+ break;
+ }
+ __ bind(&discard);
+ __ Drop(1);
+ __ jmp(true_label_);
+ break;
+ }
+ }
+}
+
+
+MemOperand FullCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
+ switch (slot->type()) {
+ case Slot::PARAMETER:
+ case Slot::LOCAL:
+ return MemOperand(fp, SlotOffset(slot));
+ case Slot::CONTEXT: {
+ int context_chain_length =
+ function_->scope()->ContextChainLength(slot->var()->scope());
+ __ LoadContext(scratch, context_chain_length);
+ return CodeGenerator::ContextOperand(scratch, slot->index());
+ }
+ case Slot::LOOKUP:
+ UNREACHABLE();
+ }
+ UNREACHABLE();
+ return MemOperand(r0, 0);
+}
+
+
+void FullCodeGenerator::Move(Register destination, Slot* source) {
+ // Use destination as scratch.
+ MemOperand slot_operand = EmitSlotSearch(source, destination);
+ __ ldr(destination, slot_operand);
+}
+
+
+void FullCodeGenerator::Move(Slot* dst,
+ Register src,
+ Register scratch1,
+ Register scratch2) {
+ ASSERT(dst->type() != Slot::LOOKUP); // Not yet implemented.
+ ASSERT(!scratch1.is(src) && !scratch2.is(src));
+ MemOperand location = EmitSlotSearch(dst, scratch1);
+ __ str(src, location);
+ // Emit the write barrier code if the location is in the heap.
+ if (dst->type() == Slot::CONTEXT) {
+ __ mov(scratch2, Operand(Context::SlotOffset(dst->index())));
+ __ RecordWrite(scratch1, scratch2, src);
+ }
+}
+
+
+void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
+ Comment cmnt(masm_, "[ Declaration");
+ Variable* var = decl->proxy()->var();
+ ASSERT(var != NULL); // Must have been resolved.
+ Slot* slot = var->slot();
+ Property* prop = var->AsProperty();
+
+ if (slot != NULL) {
+ switch (slot->type()) {
+ case Slot::PARAMETER:
+ case Slot::LOCAL:
+ if (decl->mode() == Variable::CONST) {
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ str(ip, MemOperand(fp, SlotOffset(slot)));
+ } else if (decl->fun() != NULL) {
+ VisitForValue(decl->fun(), kAccumulator);
+ __ str(result_register(), MemOperand(fp, SlotOffset(slot)));
+ }
+ break;
+
+ case Slot::CONTEXT:
+ // We bypass the general EmitSlotSearch because we know more about
+ // this specific context.
+
+ // The variable in the decl always resides in the current context.
+ ASSERT_EQ(0, function_->scope()->ContextChainLength(var->scope()));
+ if (FLAG_debug_code) {
+ // Check if we have the correct context pointer.
+ __ ldr(r1,
+ CodeGenerator::ContextOperand(cp, Context::FCONTEXT_INDEX));
+ __ cmp(r1, cp);
+ __ Check(eq, "Unexpected declaration in current context.");
+ }
+ if (decl->mode() == Variable::CONST) {
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ str(ip, CodeGenerator::ContextOperand(cp, slot->index()));
+ // No write barrier since the_hole_value is in old space.
+ } else if (decl->fun() != NULL) {
+ VisitForValue(decl->fun(), kAccumulator);
+ __ str(result_register(),
+ CodeGenerator::ContextOperand(cp, slot->index()));
+ int offset = Context::SlotOffset(slot->index());
+ __ mov(r2, Operand(offset));
+ // We know that we have written a function, which is not a smi.
+ __ mov(r1, Operand(cp));
+ __ RecordWrite(r1, r2, result_register());
+ }
+ break;
+
+ case Slot::LOOKUP: {
+ __ mov(r2, Operand(var->name()));
+ // Declaration nodes are always introduced in one of two modes.
+ ASSERT(decl->mode() == Variable::VAR ||
+ decl->mode() == Variable::CONST);
+ PropertyAttributes attr =
+ (decl->mode() == Variable::VAR) ? NONE : READ_ONLY;
+ __ mov(r1, Operand(Smi::FromInt(attr)));
+ // Push initial value, if any.
+ // Note: For variables we must not push an initial value (such as
+ // 'undefined') because we may have a (legal) redeclaration and we
+ // must not destroy the current value.
+ if (decl->mode() == Variable::CONST) {
+ __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
+ __ stm(db_w, sp, cp.bit() | r2.bit() | r1.bit() | r0.bit());
+ } else if (decl->fun() != NULL) {
+ __ stm(db_w, sp, cp.bit() | r2.bit() | r1.bit());
+ // Push initial value for function declaration.
+ VisitForValue(decl->fun(), kStack);
+ } else {
+ __ mov(r0, Operand(Smi::FromInt(0))); // No initial value!
+ __ stm(db_w, sp, cp.bit() | r2.bit() | r1.bit() | r0.bit());
+ }
+ __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+ break;
+ }
+ }
+
+ } else if (prop != NULL) {
+ if (decl->fun() != NULL || decl->mode() == Variable::CONST) {
+ // We are declaring a function or constant that rewrites to a
+ // property. Use (keyed) IC to set the initial value.
+ VisitForValue(prop->obj(), kStack);
+ VisitForValue(prop->key(), kStack);
+
+ if (decl->fun() != NULL) {
+ VisitForValue(decl->fun(), kAccumulator);
+ } else {
+ __ LoadRoot(result_register(), Heap::kTheHoleValueRootIndex);
+ }
+
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+
+ // Value in r0 is ignored (declarations are statements). Receiver
+ // and key on stack are discarded.
+ __ Drop(2);
+ }
+ }
+}
+
+
+void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
+ // Call the runtime to declare the globals.
+ // The context is the first argument.
+ __ mov(r1, Operand(pairs));
+ __ mov(r0, Operand(Smi::FromInt(is_eval_ ? 1 : 0)));
+ __ stm(db_w, sp, cp.bit() | r1.bit() | r0.bit());
+ __ CallRuntime(Runtime::kDeclareGlobals, 3);
+ // Return value is ignored.
+}
+
+
+void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
+ Comment cmnt(masm_, "[ FunctionLiteral");
+
+ // Build the function boilerplate and instantiate it.
+ Handle<JSFunction> boilerplate =
+ Compiler::BuildBoilerplate(expr, script_, this);
+ if (HasStackOverflow()) return;
+
+ ASSERT(boilerplate->IsBoilerplate());
+
+ // Create a new closure.
+ __ mov(r0, Operand(boilerplate));
+ __ stm(db_w, sp, cp.bit() | r0.bit());
+ __ CallRuntime(Runtime::kNewClosure, 2);
+ Apply(context_, r0);
+}
+
+
+void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
+ Comment cmnt(masm_, "[ VariableProxy");
+ EmitVariableLoad(expr->var(), context_);
+}
+
+
+void FullCodeGenerator::EmitVariableLoad(Variable* var,
+ Expression::Context context) {
+ // Four cases: non-this global variables, lookup slots, all other
+ // types of slots, and parameters that rewrite to explicit property
+ // accesses on the arguments object.
+ Slot* slot = var->slot();
+ Property* property = var->AsProperty();
+
+ if (var->is_global() && !var->is_this()) {
+ Comment cmnt(masm_, "Global variable");
+ // Use inline caching. Variable name is passed in r2 and the global
+ // object on the stack.
+ __ ldr(ip, CodeGenerator::GlobalObject());
+ __ push(ip);
+ __ mov(r2, Operand(var->name()));
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ DropAndApply(1, context, r0);
+
+ } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
+ Comment cmnt(masm_, "Lookup slot");
+ __ mov(r1, Operand(var->name()));
+ __ stm(db_w, sp, cp.bit() | r1.bit()); // Context and name.
+ __ CallRuntime(Runtime::kLoadContextSlot, 2);
+ Apply(context, r0);
+
+ } else if (slot != NULL) {
+ Comment cmnt(masm_, (slot->type() == Slot::CONTEXT)
+ ? "Context slot"
+ : "Stack slot");
+ Apply(context, slot);
+
+ } else {
+ Comment cmnt(masm_, "Rewritten parameter");
+ ASSERT_NOT_NULL(property);
+ // Rewritten parameter accesses are of the form "slot[literal]".
+
+ // Assert that the object is in a slot.
+ Variable* object_var = property->obj()->AsVariableProxy()->AsVariable();
+ ASSERT_NOT_NULL(object_var);
+ Slot* object_slot = object_var->slot();
+ ASSERT_NOT_NULL(object_slot);
+
+ // Load the object.
+ Move(r2, object_slot);
+
+ // Assert that the key is a smi.
+ Literal* key_literal = property->key()->AsLiteral();
+ ASSERT_NOT_NULL(key_literal);
+ ASSERT(key_literal->handle()->IsSmi());
+
+ // Load the key.
+ __ mov(r1, Operand(key_literal->handle()));
+
+ // Push both as arguments to ic.
+ __ stm(db_w, sp, r2.bit() | r1.bit());
+
+ // Do a keyed property load.
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+
+ // Drop key and object left on the stack by IC, and push the result.
+ DropAndApply(2, context, r0);
+ }
+}
+
+
+void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
+ Comment cmnt(masm_, "[ RegExpLiteral");
+ Label done;
+ // Registers will be used as follows:
+ // r4 = JS function, literals array
+ // r3 = literal index
+ // r2 = RegExp pattern
+ // r1 = RegExp flags
+ // r0 = temp + return value (RegExp literal)
+ __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ ldr(r4, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
+ int literal_offset =
+ FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
+ __ ldr(r0, FieldMemOperand(r4, literal_offset));
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ cmp(r0, ip);
+ __ b(ne, &done);
+ __ mov(r3, Operand(Smi::FromInt(expr->literal_index())));
+ __ mov(r2, Operand(expr->pattern()));
+ __ mov(r1, Operand(expr->flags()));
+ __ stm(db_w, sp, r4.bit() | r3.bit() | r2.bit() | r1.bit());
+ __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
+ __ bind(&done);
+ Apply(context_, r0);
+}
+
+
+void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
+ Comment cmnt(masm_, "[ ObjectLiteral");
+ __ ldr(r2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ ldr(r2, FieldMemOperand(r2, JSFunction::kLiteralsOffset));
+ __ mov(r1, Operand(Smi::FromInt(expr->literal_index())));
+ __ mov(r0, Operand(expr->constant_properties()));
+ __ stm(db_w, sp, r2.bit() | r1.bit() | r0.bit());
+ if (expr->depth() > 1) {
+ __ CallRuntime(Runtime::kCreateObjectLiteral, 3);
+ } else {
+ __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 3);
+ }
+
+ // If result_saved is true the result is on top of the stack. If
+ // result_saved is false the result is in r0.
+ bool result_saved = false;
+
+ for (int i = 0; i < expr->properties()->length(); i++) {
+ ObjectLiteral::Property* property = expr->properties()->at(i);
+ if (property->IsCompileTimeValue()) continue;
+
+ Literal* key = property->key();
+ Expression* value = property->value();
+ if (!result_saved) {
+ __ push(r0); // Save result on stack
+ result_saved = true;
+ }
+ switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT:
+ UNREACHABLE();
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+ ASSERT(!CompileTimeValue::IsCompileTimeValue(property->value()));
+ // Fall through.
+ case ObjectLiteral::Property::COMPUTED:
+ if (key->handle()->IsSymbol()) {
+ VisitForValue(value, kAccumulator);
+ __ mov(r2, Operand(key->handle()));
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ // StoreIC leaves the receiver on the stack.
+ break;
+ }
+ // Fall through.
+ case ObjectLiteral::Property::PROTOTYPE:
+ // Duplicate receiver on stack.
+ __ ldr(r0, MemOperand(sp));
+ __ push(r0);
+ VisitForValue(key, kStack);
+ VisitForValue(value, kStack);
+ __ CallRuntime(Runtime::kSetProperty, 3);
+ break;
+ case ObjectLiteral::Property::GETTER:
+ case ObjectLiteral::Property::SETTER:
+ // Duplicate receiver on stack.
+ __ ldr(r0, MemOperand(sp));
+ __ push(r0);
+ VisitForValue(key, kStack);
+ __ mov(r1, Operand(property->kind() == ObjectLiteral::Property::SETTER ?
+ Smi::FromInt(1) :
+ Smi::FromInt(0)));
+ __ push(r1);
+ VisitForValue(value, kStack);
+ __ CallRuntime(Runtime::kDefineAccessor, 4);
+ break;
+ }
+ }
+
+ if (result_saved) {
+ ApplyTOS(context_);
+ } else {
+ Apply(context_, r0);
+ }
+}
+
+
+void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
+ Comment cmnt(masm_, "[ ArrayLiteral");
+ __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
+ __ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
+ __ mov(r1, Operand(expr->constant_elements()));
+ __ stm(db_w, sp, r3.bit() | r2.bit() | r1.bit());
+ if (expr->depth() > 1) {
+ __ CallRuntime(Runtime::kCreateArrayLiteral, 3);
+ } else {
+ __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
+ }
+
+ bool result_saved = false; // Is the result saved to the stack?
+
+ // Emit code to evaluate all the non-constant subexpressions and to store
+ // them into the newly cloned array.
+ ZoneList<Expression*>* subexprs = expr->values();
+ for (int i = 0, len = subexprs->length(); i < len; i++) {
+ Expression* subexpr = subexprs->at(i);
+ // If the subexpression is a literal or a simple materialized literal it
+ // is already set in the cloned array.
+ if (subexpr->AsLiteral() != NULL ||
+ CompileTimeValue::IsCompileTimeValue(subexpr)) {
+ continue;
+ }
+
+ if (!result_saved) {
+ __ push(r0);
+ result_saved = true;
+ }
+ VisitForValue(subexpr, kAccumulator);
+
+ // Store the subexpression value in the array's elements.
+ __ ldr(r1, MemOperand(sp)); // Copy of array literal.
+ __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
+ int offset = FixedArray::kHeaderSize + (i * kPointerSize);
+ __ str(result_register(), FieldMemOperand(r1, offset));
+
+ // Update the write barrier for the array store with r0 as the scratch
+ // register.
+ __ mov(r2, Operand(offset));
+ __ RecordWrite(r1, r2, result_register());
+ }
+
+ if (result_saved) {
+ ApplyTOS(context_);
+ } else {
+ Apply(context_, r0);
+ }
+}
+
+
+void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
+ SetSourcePosition(prop->position());
+ Literal* key = prop->key()->AsLiteral();
+ __ mov(r2, Operand(key->handle()));
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+}
+
+
+void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
+ SetSourcePosition(prop->position());
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+}
+
+
+void FullCodeGenerator::EmitBinaryOp(Token::Value op,
+ Expression::Context context) {
+ __ pop(r1);
+ GenericBinaryOpStub stub(op, NO_OVERWRITE);
+ __ CallStub(&stub);
+ Apply(context, r0);
+}
+
+
+void FullCodeGenerator::EmitVariableAssignment(Variable* var,
+ Expression::Context context) {
+ // Three main cases: global variables, lookup slots, and all other
+ // types of slots. Left-hand-side parameters that rewrite to
+ // explicit property accesses do not reach here.
+ ASSERT(var != NULL);
+ ASSERT(var->is_global() || var->slot() != NULL);
+
+ Slot* slot = var->slot();
+ if (var->is_global()) {
+ ASSERT(!var->is_this());
+ // Assignment to a global variable. Use inline caching for the
+ // assignment. Right-hand-side value is passed in r0, variable name in
+ // r2, and the global object on the stack.
+ __ mov(r2, Operand(var->name()));
+ __ ldr(ip, CodeGenerator::GlobalObject());
+ __ push(ip);
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ // Overwrite the global object on the stack with the result if needed.
+ DropAndApply(1, context, r0);
+
+ } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
+ __ push(result_register()); // Value.
+ __ mov(r1, Operand(var->name()));
+ __ stm(db_w, sp, cp.bit() | r1.bit()); // Context and name.
+ __ CallRuntime(Runtime::kStoreContextSlot, 3);
+ Apply(context, r0);
+
+ } else if (var->slot() != NULL) {
+ Slot* slot = var->slot();
+ switch (slot->type()) {
+ case Slot::LOCAL:
+ case Slot::PARAMETER:
+ __ str(result_register(), MemOperand(fp, SlotOffset(slot)));
+ break;
+
+ case Slot::CONTEXT: {
+ MemOperand target = EmitSlotSearch(slot, r1);
+ __ str(result_register(), target);
+
+ // RecordWrite may destroy all its register arguments.
+ __ mov(r3, result_register());
+ int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
+
+ __ mov(r2, Operand(offset));
+ __ RecordWrite(r1, r2, r3);
+ break;
+ }
+
+ case Slot::LOOKUP:
+ UNREACHABLE();
+ break;
+ }
+ Apply(context, result_register());
+
+ } else {
+ // Variables rewritten as properties are not treated as variables in
+ // assignments.
+ UNREACHABLE();
+ }
+}
+
+
+void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
+ // Assignment to a property, using a named store IC.
+ Property* prop = expr->target()->AsProperty();
+ ASSERT(prop != NULL);
+ ASSERT(prop->key()->AsLiteral() != NULL);
+
+ // If the assignment starts a block of assignments to the same object,
+ // change to slow case to avoid the quadratic behavior of repeatedly
+ // adding fast properties.
+ if (expr->starts_initialization_block()) {
+ __ push(result_register());
+ __ ldr(ip, MemOperand(sp, kPointerSize)); // Receiver is now under value.
+ __ push(ip);
+ __ CallRuntime(Runtime::kToSlowProperties, 1);
+ __ pop(result_register());
+ }
+
+ // Record source code position before IC call.
+ SetSourcePosition(expr->position());
+ __ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+
+ // If the assignment ends an initialization block, revert to fast case.
+ if (expr->ends_initialization_block()) {
+ __ push(r0); // Result of assignment, saved even if not needed.
+ __ ldr(ip, MemOperand(sp, kPointerSize)); // Receiver is under value.
+ __ push(ip);
+ __ CallRuntime(Runtime::kToFastProperties, 1);
+ __ pop(r0);
+ }
+
+ DropAndApply(1, context_, r0);
+}
+
+
+void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
+ // Assignment to a property, using a keyed store IC.
+
+ // If the assignment starts a block of assignments to the same object,
+ // change to slow case to avoid the quadratic behavior of repeatedly
+ // adding fast properties.
+ if (expr->starts_initialization_block()) {
+ __ push(result_register());
+ // Receiver is now under the key and value.
+ __ ldr(ip, MemOperand(sp, 2 * kPointerSize));
+ __ push(ip);
+ __ CallRuntime(Runtime::kToSlowProperties, 1);
+ __ pop(result_register());
+ }
+
+ // Record source code position before IC call.
+ SetSourcePosition(expr->position());
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+
+ // If the assignment ends an initialization block, revert to fast case.
+ if (expr->ends_initialization_block()) {
+ __ push(r0); // Result of assignment, saved even if not needed.
+ // Receiver is under the key and value.
+ __ ldr(ip, MemOperand(sp, 2 * kPointerSize));
+ __ push(ip);
+ __ CallRuntime(Runtime::kToFastProperties, 1);
+ __ pop(r0);
+ }
+
+ // Receiver and key are still on stack.
+ DropAndApply(2, context_, r0);
+}
+
+
+void FullCodeGenerator::VisitProperty(Property* expr) {
+ Comment cmnt(masm_, "[ Property");
+ Expression* key = expr->key();
+
+ // Evaluate receiver.
+ VisitForValue(expr->obj(), kStack);
+
+ if (key->IsPropertyName()) {
+ EmitNamedPropertyLoad(expr);
+ // Drop receiver left on the stack by IC.
+ DropAndApply(1, context_, r0);
+ } else {
+ VisitForValue(expr->key(), kStack);
+ EmitKeyedPropertyLoad(expr);
+ // Drop key and receiver left on the stack by IC.
+ DropAndApply(2, context_, r0);
+ }
+}
+
+void FullCodeGenerator::EmitCallWithIC(Call* expr,
+ Handle<Object> ignored,
+ RelocInfo::Mode mode) {
+ // Code common for calls using the IC.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForValue(args->at(i), kStack);
+ }
+ // Record source position for debugger.
+ SetSourcePosition(expr->position());
+ // Call the IC initialization code.
+ Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
+ NOT_IN_LOOP);
+ __ Call(ic, mode);
+ // Restore context register.
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Discard the function left on TOS.
+ DropAndApply(1, context_, r0);
+}
+
+
+void FullCodeGenerator::EmitCallWithStub(Call* expr) {
+ // Code common for calls using the call stub.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForValue(args->at(i), kStack);
+ }
+ // Record source position for debugger.
+ SetSourcePosition(expr->position());
+ CallFunctionStub stub(arg_count, NOT_IN_LOOP, RECEIVER_MIGHT_BE_VALUE);
+ __ CallStub(&stub);
+ // Restore context register.
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Discard the function left on TOS.
+ DropAndApply(1, context_, r0);
+}
+
+
+void FullCodeGenerator::VisitCall(Call* expr) {
+ Comment cmnt(masm_, "[ Call");
+ Expression* fun = expr->expression();
+ Variable* var = fun->AsVariableProxy()->AsVariable();
+
+ if (var != NULL && var->is_possibly_eval()) {
+ // Call to the identifier 'eval'.
+ UNREACHABLE();
+ } else if (var != NULL && !var->is_this() && var->is_global()) {
+ // Call to a global variable.
+ __ mov(r1, Operand(var->name()));
+ // Push global object as receiver for the call IC lookup.
+ __ ldr(r0, CodeGenerator::GlobalObject());
+ __ stm(db_w, sp, r1.bit() | r0.bit());
+ EmitCallWithIC(expr, var->name(), RelocInfo::CODE_TARGET_CONTEXT);
+ } else if (var != NULL && var->slot() != NULL &&
+ var->slot()->type() == Slot::LOOKUP) {
+ // Call to a lookup slot.
+ UNREACHABLE();
+ } else if (fun->AsProperty() != NULL) {
+ // Call to an object property.
+ Property* prop = fun->AsProperty();
+ Literal* key = prop->key()->AsLiteral();
+ if (key != NULL && key->handle()->IsSymbol()) {
+ // Call to a named property, use call IC.
+ __ mov(r0, Operand(key->handle()));
+ __ push(r0);
+ VisitForValue(prop->obj(), kStack);
+ EmitCallWithIC(expr, key->handle(), RelocInfo::CODE_TARGET);
+ } else {
+ // Call to a keyed property, use keyed load IC followed by function
+ // call.
+ VisitForValue(prop->obj(), kStack);
+ VisitForValue(prop->key(), kStack);
+ // Record source code position for IC call.
+ SetSourcePosition(prop->position());
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ // Load receiver object into r1.
+ if (prop->is_synthetic()) {
+ __ ldr(r1, CodeGenerator::GlobalObject());
+ __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
+ } else {
+ __ ldr(r1, MemOperand(sp, kPointerSize));
+ }
+ // Overwrite (object, key) with (function, receiver).
+ __ str(r0, MemOperand(sp, kPointerSize));
+ __ str(r1, MemOperand(sp));
+ EmitCallWithStub(expr);
+ }
+ } else {
+ // Call to some other expression. If the expression is an anonymous
+ // function literal not called in a loop, mark it as one that should
+ // also use the fast code generator.
+ FunctionLiteral* lit = fun->AsFunctionLiteral();
+ if (lit != NULL &&
+ lit->name()->Equals(Heap::empty_string()) &&
+ loop_depth() == 0) {
+ lit->set_try_full_codegen(true);
+ }
+ VisitForValue(fun, kStack);
+ // Load global receiver object.
+ __ ldr(r1, CodeGenerator::GlobalObject());
+ __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
+ __ push(r1);
+ // Emit function call.
+ EmitCallWithStub(expr);
+ }
+}
+
+
+void FullCodeGenerator::VisitCallNew(CallNew* expr) {
+ Comment cmnt(masm_, "[ CallNew");
+ // According to ECMA-262, section 11.2.2, page 44, the function
+ // expression in new calls must be evaluated before the
+ // arguments.
+ // Push function on the stack.
+ VisitForValue(expr->expression(), kStack);
+
+ // Push global object (receiver).
+ __ ldr(r0, CodeGenerator::GlobalObject());
+ __ push(r0);
+ // Push the arguments ("left-to-right") on the stack.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForValue(args->at(i), kStack);
+ }
+
+ // Call the construct call builtin that handles allocation and
+ // constructor invocation.
+ SetSourcePosition(expr->position());
+
+ // Load function, arg_count into r1 and r0.
+ __ mov(r0, Operand(arg_count));
+ // Function is in sp[arg_count + 1].
+ __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
+
+ Handle<Code> construct_builtin(Builtins::builtin(Builtins::JSConstructCall));
+ __ Call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
+
+ // Replace function on TOS with result in r0, or pop it.
+ DropAndApply(1, context_, r0);
+}
+
+
+void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
+ Comment cmnt(masm_, "[ CallRuntime");
+ ZoneList<Expression*>* args = expr->arguments();
+
+ if (expr->is_jsruntime()) {
+ // Prepare for calling JS runtime function.
+ __ mov(r1, Operand(expr->name()));
+ __ ldr(r0, CodeGenerator::GlobalObject());
+ __ ldr(r0, FieldMemOperand(r0, GlobalObject::kBuiltinsOffset));
+ __ stm(db_w, sp, r1.bit() | r0.bit());
+ }
+
+ // Push the arguments ("left-to-right").
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForValue(args->at(i), kStack);
+ }
+
+ if (expr->is_jsruntime()) {
+ // Call the JS runtime function.
+ Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
+ NOT_IN_LOOP);
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ // Restore context register.
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Discard the function left on TOS.
+ DropAndApply(1, context_, r0);
+ } else {
+ // Call the C runtime function.
+ __ CallRuntime(expr->function(), arg_count);
+ Apply(context_, r0);
+ }
+}
+
+
+void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
+ switch (expr->op()) {
+ case Token::VOID: {
+ Comment cmnt(masm_, "[ UnaryOperation (VOID)");
+ VisitForEffect(expr->expression());
+ switch (context_) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ break;
+ case Expression::kEffect:
+ break;
+ case Expression::kValue:
+ __ LoadRoot(result_register(), Heap::kUndefinedValueRootIndex);
+ switch (location_) {
+ case kAccumulator:
+ break;
+ case kStack:
+ __ push(result_register());
+ break;
+ }
+ break;
+ case Expression::kTestValue:
+ // Value is false so it's needed.
+ __ LoadRoot(result_register(), Heap::kUndefinedValueRootIndex);
+ switch (location_) {
+ case kAccumulator:
+ break;
+ case kStack:
+ __ push(result_register());
+ break;
+ }
+ // Fall through.
+ case Expression::kTest:
+ case Expression::kValueTest:
+ __ jmp(false_label_);
+ break;
+ }
+ break;
+ }
+
+ case Token::NOT: {
+ Comment cmnt(masm_, "[ UnaryOperation (NOT)");
+ Label materialize_true, materialize_false, done;
+ // Initially assume a pure test context. Notice that the labels are
+ // swapped.
+ Label* if_true = false_label_;
+ Label* if_false = true_label_;
+ switch (context_) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ break;
+ case Expression::kEffect:
+ if_true = &done;
+ if_false = &done;
+ break;
+ case Expression::kValue:
+ if_true = &materialize_false;
+ if_false = &materialize_true;
+ break;
+ case Expression::kTest:
+ break;
+ case Expression::kValueTest:
+ if_false = &materialize_true;
+ break;
+ case Expression::kTestValue:
+ if_true = &materialize_false;
+ break;
+ }
+ VisitForControl(expr->expression(), if_true, if_false);
+ Apply(context_, if_false, if_true); // Labels swapped.
+ break;
+ }
+
+ case Token::TYPEOF: {
+ Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
+ VariableProxy* proxy = expr->expression()->AsVariableProxy();
+ if (proxy != NULL &&
+ !proxy->var()->is_this() &&
+ proxy->var()->is_global()) {
+ Comment cmnt(masm_, "Global variable");
+ __ ldr(r0, CodeGenerator::GlobalObject());
+ __ push(r0);
+ __ mov(r2, Operand(proxy->name()));
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ // Use a regular load, not a contextual load, to avoid a reference
+ // error.
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ __ str(r0, MemOperand(sp));
+ } else if (proxy != NULL &&
+ proxy->var()->slot() != NULL &&
+ proxy->var()->slot()->type() == Slot::LOOKUP) {
+ __ mov(r0, Operand(proxy->name()));
+ __ stm(db_w, sp, cp.bit() | r0.bit());
+ __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+ __ push(r0);
+ } else {
+ // This expression cannot throw a reference error at the top level.
+ VisitForValue(expr->expression(), kStack);
+ }
+
+ __ CallRuntime(Runtime::kTypeof, 1);
+ Apply(context_, r0);
+ break;
+ }
+
+ case Token::ADD: {
+ Comment cmt(masm_, "[ UnaryOperation (ADD)");
+ VisitForValue(expr->expression(), kAccumulator);
+ Label no_conversion;
+ __ tst(result_register(), Operand(kSmiTagMask));
+ __ b(eq, &no_conversion);
+ __ push(r0);
+ __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS);
+ __ bind(&no_conversion);
+ Apply(context_, result_register());
+ break;
+ }
+
+ case Token::SUB: {
+ Comment cmt(masm_, "[ UnaryOperation (SUB)");
+ bool overwrite =
+ (expr->expression()->AsBinaryOperation() != NULL &&
+ expr->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
+ GenericUnaryOpStub stub(Token::SUB, overwrite);
+ // GenericUnaryOpStub expects the argument to be in the
+ // accumulator register r0.
+ VisitForValue(expr->expression(), kAccumulator);
+ __ CallStub(&stub);
+ Apply(context_, r0);
+ break;
+ }
+
+ case Token::BIT_NOT: {
+ Comment cmt(masm_, "[ UnaryOperation (BIT_NOT)");
+ bool overwrite =
+ (expr->expression()->AsBinaryOperation() != NULL &&
+ expr->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
+ GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
+ // GenericUnaryOpStub expects the argument to be in the
+ // accumulator register r0.
+ VisitForValue(expr->expression(), kAccumulator);
+ // Avoid calling the stub for Smis.
+ Label smi, done;
+ __ tst(result_register(), Operand(kSmiTagMask));
+ __ b(eq, &smi);
+ // Non-smi: call stub leaving result in accumulator register.
+ __ CallStub(&stub);
+ __ b(&done);
+ // Perform operation directly on Smis.
+ __ bind(&smi);
+ __ mvn(result_register(), Operand(result_register()));
+ // Bit-clear inverted smi-tag.
+ __ bic(result_register(), result_register(), Operand(kSmiTagMask));
+ __ bind(&done);
+ Apply(context_, result_register());
+ break;
+ }
+
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
+ Comment cmnt(masm_, "[ CountOperation");
+
+ // Expression can only be a property, a global or a (parameter or local)
+ // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+ enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+ LhsKind assign_type = VARIABLE;
+ Property* prop = expr->expression()->AsProperty();
+ // In case of a property we use the uninitialized expression context
+ // of the key to detect a named property.
+ if (prop != NULL) {
+ assign_type =
+ (prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
+ }
+
+ // Evaluate expression and get value.
+ if (assign_type == VARIABLE) {
+ ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
+ Location saved_location = location_;
+ location_ = kAccumulator;
+ EmitVariableLoad(expr->expression()->AsVariableProxy()->var(),
+ Expression::kValue);
+ location_ = saved_location;
+ } else {
+ // Reserve space for result of postfix operation.
+ if (expr->is_postfix() && context_ != Expression::kEffect) {
+ __ mov(ip, Operand(Smi::FromInt(0)));
+ __ push(ip);
+ }
+ VisitForValue(prop->obj(), kStack);
+ if (assign_type == NAMED_PROPERTY) {
+ EmitNamedPropertyLoad(prop);
+ } else {
+ VisitForValue(prop->key(), kStack);
+ EmitKeyedPropertyLoad(prop);
+ }
+ }
+
+ // Call ToNumber only if operand is not a smi.
+ Label no_conversion;
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, &no_conversion);
+ __ push(r0);
+ __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS);
+ __ bind(&no_conversion);
+
+ // Save result for postfix expressions.
+ if (expr->is_postfix()) {
+ switch (context_) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ // Do not save result.
+ break;
+ case Expression::kValue:
+ case Expression::kTest:
+ case Expression::kValueTest:
+ case Expression::kTestValue:
+ // Save the result on the stack. If we have a named or keyed property
+ // we store the result under the receiver that is currently on top
+ // of the stack.
+ switch (assign_type) {
+ case VARIABLE:
+ __ push(r0);
+ break;
+ case NAMED_PROPERTY:
+ __ str(r0, MemOperand(sp, kPointerSize));
+ break;
+ case KEYED_PROPERTY:
+ __ str(r0, MemOperand(sp, 2 * kPointerSize));
+ break;
+ }
+ break;
+ }
+ }
+
+
+ // Inline smi case if we are in a loop.
+ Label stub_call, done;
+ if (loop_depth() > 0) {
+ __ add(r0, r0, Operand(expr->op() == Token::INC
+ ? Smi::FromInt(1)
+ : Smi::FromInt(-1)));
+ __ b(vs, &stub_call);
+ // We could eliminate this smi check if we split the code at
+ // the first smi check before calling ToNumber.
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, &done);
+ __ bind(&stub_call);
+ // Call stub. Undo operation first.
+ __ sub(r0, r0, Operand(r1));
+ }
+ __ mov(r1, Operand(expr->op() == Token::INC
+ ? Smi::FromInt(1)
+ : Smi::FromInt(-1)));
+ GenericBinaryOpStub stub(Token::ADD, NO_OVERWRITE);
+ __ CallStub(&stub);
+ __ bind(&done);
+
+ // Store the value returned in r0.
+ switch (assign_type) {
+ case VARIABLE:
+ if (expr->is_postfix()) {
+ EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+ Expression::kEffect);
+ // For all contexts except kEffect: We have the result on
+ // top of the stack.
+ if (context_ != Expression::kEffect) {
+ ApplyTOS(context_);
+ }
+ } else {
+ EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+ context_);
+ }
+ break;
+ case NAMED_PROPERTY: {
+ __ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ if (expr->is_postfix()) {
+ __ Drop(1); // Result is on the stack under the receiver.
+ if (context_ != Expression::kEffect) {
+ ApplyTOS(context_);
+ }
+ } else {
+ DropAndApply(1, context_, r0);
+ }
+ break;
+ }
+ case KEYED_PROPERTY: {
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ if (expr->is_postfix()) {
+ __ Drop(2); // Result is on the stack under the key and the receiver.
+ if (context_ != Expression::kEffect) {
+ ApplyTOS(context_);
+ }
+ } else {
+ DropAndApply(2, context_, r0);
+ }
+ break;
+ }
+ }
+}
+
+
+void FullCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
+ Comment cmnt(masm_, "[ BinaryOperation");
+ switch (expr->op()) {
+ case Token::COMMA:
+ VisitForEffect(expr->left());
+ Visit(expr->right());
+ break;
+
+ case Token::OR:
+ case Token::AND:
+ EmitLogicalOperation(expr);
+ break;
+
+ case Token::ADD:
+ case Token::SUB:
+ case Token::DIV:
+ case Token::MOD:
+ case Token::MUL:
+ case Token::BIT_OR:
+ case Token::BIT_AND:
+ case Token::BIT_XOR:
+ case Token::SHL:
+ case Token::SHR:
+ case Token::SAR:
+ VisitForValue(expr->left(), kStack);
+ VisitForValue(expr->right(), kAccumulator);
+ EmitBinaryOp(expr->op(), context_);
+ break;
+
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
+ Comment cmnt(masm_, "[ CompareOperation");
+
+ // Always perform the comparison for its control flow. Pack the result
+ // into the expression's context after the comparison is performed.
+ Label materialize_true, materialize_false, done;
+ // Initially assume we are in a test context.
+ Label* if_true = true_label_;
+ Label* if_false = false_label_;
+ switch (context_) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ break;
+ case Expression::kEffect:
+ if_true = &done;
+ if_false = &done;
+ break;
+ case Expression::kValue:
+ if_true = &materialize_true;
+ if_false = &materialize_false;
+ break;
+ case Expression::kTest:
+ break;
+ case Expression::kValueTest:
+ if_true = &materialize_true;
+ break;
+ case Expression::kTestValue:
+ if_false = &materialize_false;
+ break;
+ }
+
+ VisitForValue(expr->left(), kStack);
+ switch (expr->op()) {
+ case Token::IN:
+ VisitForValue(expr->right(), kStack);
+ __ InvokeBuiltin(Builtins::IN, CALL_JS);
+ __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+ __ cmp(r0, ip);
+ __ b(eq, if_true);
+ __ jmp(if_false);
+ break;
+
+ case Token::INSTANCEOF: {
+ VisitForValue(expr->right(), kStack);
+ InstanceofStub stub;
+ __ CallStub(&stub);
+ __ tst(r0, r0);
+ __ b(eq, if_true); // The stub returns 0 for true.
+ __ jmp(if_false);
+ break;
+ }
+
+ default: {
+ VisitForValue(expr->right(), kAccumulator);
+ Condition cc = eq;
+ bool strict = false;
+ switch (expr->op()) {
+ case Token::EQ_STRICT:
+ strict = true;
+ // Fall through
+ case Token::EQ:
+ cc = eq;
+ __ pop(r1);
+ break;
+ case Token::LT:
+ cc = lt;
+ __ pop(r1);
+ break;
+ case Token::GT:
+ // Reverse left and right sides to obtain ECMA-262 conversion order.
+ cc = lt;
+ __ mov(r1, result_register());
+ __ pop(r0);
+ break;
+ case Token::LTE:
+ // Reverse left and right sides to obtain ECMA-262 conversion order.
+ cc = ge;
+ __ mov(r1, result_register());
+ __ pop(r0);
+ break;
+ case Token::GTE:
+ cc = ge;
+ __ pop(r1);
+ break;
+ case Token::IN:
+ case Token::INSTANCEOF:
+ default:
+ UNREACHABLE();
+ }
+
+ // The comparison stub expects the smi vs. smi case to be handled
+ // before it is called.
+ Label slow_case;
+ __ orr(r2, r0, Operand(r1));
+ __ tst(r2, Operand(kSmiTagMask));
+ __ b(ne, &slow_case);
+ __ cmp(r1, r0);
+ __ b(cc, if_true);
+ __ jmp(if_false);
+
+ __ bind(&slow_case);
+ CompareStub stub(cc, strict);
+ __ CallStub(&stub);
+ __ cmp(r0, Operand(0));
+ __ b(cc, if_true);
+ __ jmp(if_false);
+ }
+ }
+
+ // Convert the result of the comparison into one expected for this
+ // expression's context.
+ Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
+ __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ Apply(context_, r0);
+}
+
+
+Register FullCodeGenerator::result_register() { return r0; }
+
+
+Register FullCodeGenerator::context_register() { return cp; }
+
+
+void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
+ ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
+ __ str(value, MemOperand(fp, frame_offset));
+}
+
+
+void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
+ __ ldr(dst, CodeGenerator::ContextOperand(cp, context_index));
+}
+
+
+// ----------------------------------------------------------------------------
+// Non-local control flow support.
+
+void FullCodeGenerator::EnterFinallyBlock() {
+ ASSERT(!result_register().is(r1));
+ // Store result register while executing finally block.
+ __ push(result_register());
+ // Cook return address in link register to stack (smi encoded Code* delta)
+ __ sub(r1, lr, Operand(masm_->CodeObject()));
+ ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
+ ASSERT_EQ(0, kSmiTag);
+ __ add(r1, r1, Operand(r1)); // Convert to smi.
+ __ push(r1);
+}
+
+
+void FullCodeGenerator::ExitFinallyBlock() {
+ ASSERT(!result_register().is(r1));
+ // Restore result register from stack.
+ __ pop(r1);
+ // Uncook return address and return.
+ __ pop(result_register());
+ ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
+ __ mov(r1, Operand(r1, ASR, 1)); // Un-smi-tag value.
+ __ add(pc, r1, Operand(masm_->CodeObject()));
+}
+
+
+#undef __
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc
index a1f26130a..bae1e9679 100644
--- a/deps/v8/src/arm/ic-arm.cc
+++ b/deps/v8/src/arm/ic-arm.cc
@@ -170,7 +170,6 @@ void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
// -- lr : return address
// -- [sp] : receiver
// -----------------------------------
-
Label miss;
__ ldr(r0, MemOperand(sp, 0));
@@ -204,7 +203,6 @@ void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
// -- lr : return address
// -- [sp] : receiver
// -----------------------------------
-
Label miss;
// Load receiver.
@@ -318,7 +316,6 @@ void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
// ----------- S t a t e -------------
// -- lr: return address
// -----------------------------------
-
Label miss, global_object, non_global_object;
// Get the receiver of the function from the stack into r1.
@@ -451,7 +448,6 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
// -- lr : return address
// -- [sp] : receiver
// -----------------------------------
-
Label miss, probe, global;
__ ldr(r0, MemOperand(sp, 0));
@@ -543,6 +539,8 @@ void KeyedLoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
// -- lr : return address
// -- sp[0] : key
// -- sp[4] : receiver
+ // -----------------------------------
+
__ ldm(ia, sp, r2.bit() | r3.bit());
__ stm(db_w, sp, r2.bit() | r3.bit());
@@ -555,6 +553,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// -- lr : return address
// -- sp[0] : key
// -- sp[4] : receiver
+ // -----------------------------------
Label slow, fast;
// Get the key and receiver object from the stack.
@@ -569,11 +568,10 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Get the map of the receiver.
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
- // Check that the receiver does not require access checks. We need
- // to check this explicitly since this generic stub does not perform
- // map checks.
+
+ // Check bit field.
__ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset));
- __ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded));
+ __ tst(r3, Operand(kSlowCaseBitFieldMask));
__ b(ne, &slow);
// Check that the object is some kind of JS object EXCEPT JS Value type.
// In the case that the object is a value-wrapper object,
@@ -623,6 +621,8 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
// -- lr : return address
// -- sp[0] : key
// -- sp[4] : receiver
+ // -----------------------------------
+
GenerateGeneric(masm);
}
@@ -641,6 +641,7 @@ void KeyedStoreIC::Generate(MacroAssembler* masm,
// -- lr : return address
// -- sp[0] : key
// -- sp[1] : receiver
+ // -----------------------------------
__ ldm(ia, sp, r2.bit() | r3.bit());
__ stm(db_w, sp, r0.bit() | r2.bit() | r3.bit());
@@ -655,7 +656,9 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// -- lr : return address
// -- sp[0] : key
// -- sp[1] : receiver
+ // -----------------------------------
Label slow, fast, array, extra, exit;
+
// Get the key and the object from the stack.
__ ldm(ia, sp, r1.bit() | r3.bit()); // r1 = key, r3 = receiver
// Check that the key is a smi.
@@ -807,7 +810,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
StubCache::GenerateProbe(masm, flags, r1, r2, r3, no_reg);
// Cache miss: Jump to runtime.
- Generate(masm, ExternalReference(IC_Utility(kStoreIC_Miss)));
+ GenerateMiss(masm);
}
@@ -828,7 +831,7 @@ void StoreIC::GenerateExtendStorage(MacroAssembler* masm) {
}
-void StoreIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
+void StoreIC::GenerateMiss(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r2 : name
@@ -840,7 +843,7 @@ void StoreIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
__ stm(db_w, sp, r0.bit() | r2.bit() | r3.bit());
// Perform tail call to the entry.
- __ TailCallRuntime(f, 3, 1);
+ __ TailCallRuntime(ExternalReference(IC_Utility(kStoreIC_Miss)), 3, 1);
}
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index 18cadaca3..b39404e7f 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -205,6 +205,11 @@ void MacroAssembler::LoadRoot(Register destination,
// tag is shifted away.
void MacroAssembler::RecordWrite(Register object, Register offset,
Register scratch) {
+ // The compiled code assumes that record write doesn't change the
+ // context register, so we check that none of the clobbered
+ // registers are cp.
+ ASSERT(!object.is(cp) && !offset.is(cp) && !scratch.is(cp));
+
// This is how much we shift the remembered set bit offset to get the
// offset of the word in the remembered set. We divide by kBitsPerInt (32,
// shift right 5) and then multiply by kIntSize (4, shift left 2).
@@ -272,6 +277,14 @@ void MacroAssembler::RecordWrite(Register object, Register offset,
str(scratch, MemOperand(object));
bind(&done);
+
+ // Clobber all input registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (FLAG_debug_code) {
+ mov(object, Operand(bit_cast<int32_t>(kZapValue)));
+ mov(offset, Operand(bit_cast<int32_t>(kZapValue)));
+ mov(scratch, Operand(bit_cast<int32_t>(kZapValue)));
+ }
}
@@ -1035,9 +1048,13 @@ void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
return;
}
- Runtime::FunctionId function_id =
- static_cast<Runtime::FunctionId>(f->stub_id);
- RuntimeStub stub(function_id, num_arguments);
+ // TODO(1236192): Most runtime routines don't need the number of
+ // arguments passed in because it is constant. At some point we
+ // should remove this need and make the runtime routine entry code
+ // smarter.
+ mov(r0, Operand(num_arguments));
+ mov(r1, Operand(ExternalReference(f)));
+ CEntryStub stub(1);
CallStub(&stub);
}
@@ -1221,6 +1238,46 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
}
+void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
+ Register first,
+ Register second,
+ Register scratch1,
+ Register scratch2,
+ Label* failure) {
+ // Test that both first and second are sequential ASCII strings.
+ // Assume that they are non-smis.
+ ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
+ ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
+ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+ ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
+ int kFlatAsciiStringMask =
+ kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
+ int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+ and_(scratch1, scratch1, Operand(kFlatAsciiStringMask));
+ and_(scratch2, scratch2, Operand(kFlatAsciiStringMask));
+ cmp(scratch1, Operand(kFlatAsciiStringTag));
+ // Ignore second test if first test failed.
+ cmp(scratch2, Operand(kFlatAsciiStringTag), eq);
+ b(ne, failure);
+}
+
+void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
+ Register second,
+ Register scratch1,
+ Register scratch2,
+ Label* failure) {
+ // Check that neither is a smi.
+ ASSERT_EQ(0, kSmiTag);
+ and_(scratch1, first, Operand(second));
+ tst(scratch1, Operand(kSmiTagMask));
+ b(eq, failure);
+ JumpIfNonSmisNotBothSequentialAsciiStrings(first,
+ second,
+ scratch1,
+ scratch2,
+ failure);
+}
+
#ifdef ENABLE_DEBUGGER_SUPPORT
CodePatcher::CodePatcher(byte* address, int instructions)
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index 8f2064a74..efc5bfae7 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -337,6 +337,25 @@ class MacroAssembler: public Assembler {
void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
bool allow_stub_calls() { return allow_stub_calls_; }
+ // ---------------------------------------------------------------------------
+ // String utilities
+
+ // Checks if both objects are sequential ASCII strings and jumps to label
+ // if either is not. Assumes that neither object is a smi.
+ void JumpIfNonSmisNotBothSequentialAsciiStrings(Register object1,
+ Register object2,
+ Register scratch1,
+ Register scratch2,
+ Label *failure);
+
+ // Checks if both objects are sequential ASCII strings and jumps to label
+ // if either is not.
+ void JumpIfNotBothSequentialAsciiStrings(Register first,
+ Register second,
+ Register scratch1,
+ Register scratch2,
+ Label* not_flat_ascii_strings);
+
private:
List<Unresolved> unresolved_;
bool generating_stub_;
diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/arm/regexp-macro-assembler-arm.cc
index 5ea775104..9dd3b9326 100644
--- a/deps/v8/src/arm/regexp-macro-assembler-arm.cc
+++ b/deps/v8/src/arm/regexp-macro-assembler-arm.cc
@@ -63,8 +63,6 @@ namespace internal {
* through the runtime system)
* - stack_area_base (High end of the memory area to use as
* backtracking stack)
- * - at_start (if 1, we are starting at the start of the
- * string, otherwise 0)
* - int* capture_array (int[num_saved_registers_], for output).
* --- sp when called ---
* - link address
@@ -76,6 +74,8 @@ namespace internal {
* - void* input_string (location of a handle containing the string)
* - Offset of location before start of input (effectively character
* position -1). Used to initialize capture registers to a non-position.
+ * - At start (if 1, we are starting at the start of the
+ * string, otherwise 0)
* - register 0 (Only positions must be stored in the first
* - register 1 num_saved_registers_ registers)
* - ...
@@ -526,64 +526,54 @@ bool RegExpMacroAssemblerARM::CheckSpecialCharacterClass(uc16 type,
return true;
}
case 'n': {
- // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
- __ eor(r0, current_character(), Operand(0x01));
- // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
- __ sub(r0, r0, Operand(0x0b));
- __ cmp(r0, Operand(0x0c - 0x0b));
- if (mode_ == ASCII) {
- BranchOrBacktrack(hi, on_no_match);
- } else {
- Label done;
- __ b(ls, &done);
- // Compare original value to 0x2028 and 0x2029, using the already
- // computed (current_char ^ 0x01 - 0x0b). I.e., check for
- // 0x201d (0x2028 - 0x0b) or 0x201e.
- __ sub(r0, r0, Operand(0x2028 - 0x0b));
- __ cmp(r0, Operand(1));
- BranchOrBacktrack(hi, on_no_match);
- __ bind(&done);
- }
- return true;
+ // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
+ __ eor(r0, current_character(), Operand(0x01));
+ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
+ __ sub(r0, r0, Operand(0x0b));
+ __ cmp(r0, Operand(0x0c - 0x0b));
+ if (mode_ == ASCII) {
+ BranchOrBacktrack(hi, on_no_match);
+ } else {
+ Label done;
+ __ b(ls, &done);
+ // Compare original value to 0x2028 and 0x2029, using the already
+ // computed (current_char ^ 0x01 - 0x0b). I.e., check for
+ // 0x201d (0x2028 - 0x0b) or 0x201e.
+ __ sub(r0, r0, Operand(0x2028 - 0x0b));
+ __ cmp(r0, Operand(1));
+ BranchOrBacktrack(hi, on_no_match);
+ __ bind(&done);
}
+ return true;
+ }
case 'w': {
- // Match word character (0-9, A-Z, a-z and _).
- Label digits, done;
- __ cmp(current_character(), Operand('9'));
- __ b(ls, &digits);
- __ cmp(current_character(), Operand('_'));
- __ b(eq, &done);
- __ orr(r0, current_character(), Operand(0x20));
- __ sub(r0, r0, Operand('a'));
- __ cmp(r0, Operand('z' - 'a'));
- BranchOrBacktrack(hi, on_no_match);
- __ jmp(&done);
-
- __ bind(&digits);
- __ cmp(current_character(), Operand('0'));
- BranchOrBacktrack(lo, on_no_match);
- __ bind(&done);
-
+ if (mode_ != ASCII) {
+ // Table is 128 entries, so all ASCII characters can be tested.
+ __ cmp(current_character(), Operand('z'));
+ BranchOrBacktrack(hi, on_no_match);
+ }
+ ExternalReference map = ExternalReference::re_word_character_map();
+ __ mov(r0, Operand(map));
+ __ ldrb(r0, MemOperand(r0, current_character()));
+ __ tst(r0, Operand(r0));
+ BranchOrBacktrack(eq, on_no_match);
return true;
}
case 'W': {
- // Match non-word character (not 0-9, A-Z, a-z and _).
- Label digits, done;
- __ cmp(current_character(), Operand('9'));
- __ b(ls, &digits);
- __ cmp(current_character(), Operand('_'));
- BranchOrBacktrack(eq, on_no_match);
- __ orr(r0, current_character(), Operand(0x20));
- __ sub(r0, r0, Operand('a'));
- __ cmp(r0, Operand('z' - 'a'));
- BranchOrBacktrack(ls, on_no_match);
- __ jmp(&done);
-
- __ bind(&digits);
- __ cmp(current_character(), Operand('0'));
- BranchOrBacktrack(hs, on_no_match);
- __ bind(&done);
-
+ Label done;
+ if (mode_ != ASCII) {
+ // Table is 128 entries, so all ASCII characters can be tested.
+ __ cmp(current_character(), Operand('z'));
+ __ b(hi, &done);
+ }
+ ExternalReference map = ExternalReference::re_word_character_map();
+ __ mov(r0, Operand(map));
+ __ ldrb(r0, MemOperand(r0, current_character()));
+ __ tst(r0, Operand(r0));
+ BranchOrBacktrack(ne, on_no_match);
+ if (mode_ != ASCII) {
+ __ bind(&done);
+ }
return true;
}
case '*':
@@ -620,6 +610,7 @@ Handle<Object> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// Set frame pointer just above the arguments.
__ add(frame_pointer(), sp, Operand(4 * kPointerSize));
__ push(r0); // Make room for "position - 1" constant (value is irrelevant).
+ __ push(r0); // Make room for "at start" constant (value is irrelevant).
// Check if we have space on the stack for registers.
Label stack_limit_hit;
@@ -663,6 +654,15 @@ Handle<Object> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// Store this value in a local variable, for use when clearing
// position registers.
__ str(r0, MemOperand(frame_pointer(), kInputStartMinusOne));
+
+ // Determine whether the start index is zero, that is at the start of the
+ // string, and store that value in a local variable.
+ __ ldr(r1, MemOperand(frame_pointer(), kStartIndex));
+ __ tst(r1, Operand(r1));
+ __ mov(r1, Operand(1), LeaveCC, eq);
+ __ mov(r1, Operand(0), LeaveCC, ne);
+ __ str(r1, MemOperand(frame_pointer(), kAtStart));
+
if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
// Fill saved registers with initial value = start offset - 1
diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.h b/deps/v8/src/arm/regexp-macro-assembler-arm.h
index 4459859a3..7de5f93d7 100644
--- a/deps/v8/src/arm/regexp-macro-assembler-arm.h
+++ b/deps/v8/src/arm/regexp-macro-assembler-arm.h
@@ -123,8 +123,7 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
static const int kReturnAddress = kStoredRegisters + 8 * kPointerSize;
// Stack parameters placed by caller.
static const int kRegisterOutput = kReturnAddress + kPointerSize;
- static const int kAtStart = kRegisterOutput + kPointerSize;
- static const int kStackHighEnd = kAtStart + kPointerSize;
+ static const int kStackHighEnd = kRegisterOutput + kPointerSize;
static const int kDirectCall = kStackHighEnd + kPointerSize;
// Below the frame pointer.
@@ -136,8 +135,9 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
// When adding local variables remember to push space for them in
// the frame in GetCode.
static const int kInputStartMinusOne = kInputString - kPointerSize;
+ static const int kAtStart = kInputStartMinusOne - kPointerSize;
// First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
+ static const int kRegisterZero = kAtStart - kPointerSize;
// Initial size of code buffer.
static const size_t kRegExpCodeSize = 1024;
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index f3927720f..f5431512f 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -47,9 +47,9 @@ using ::v8::internal::ReadLine;
using ::v8::internal::DeleteArray;
// This macro provides a platform independent use of sscanf. The reason for
-// SScanF not being implemented in a platform independent was through
-// ::v8::internal::OS in the same way as SNPrintF is that the Windows C Run-Time
-// Library does not provide vsscanf.
+// SScanF not being implemented in a platform independent way through
+// ::v8::internal::OS in the same way as SNPrintF is that the
+// Windows C Run-Time Library does not provide vsscanf.
#define SScanF sscanf // NOLINT
// The Debugger class is used by the simulator while debugging simulated ARM
@@ -355,6 +355,10 @@ void Debugger::Debug() {
} else {
PrintF("Not at debugger stop.");
}
+ } else if ((strcmp(cmd, "t") == 0) || strcmp(cmd, "trace") == 0) {
+ ::v8::internal::FLAG_trace_sim = !::v8::internal::FLAG_trace_sim;
+ PrintF("Trace of executed instructions is %s\n",
+ ::v8::internal::FLAG_trace_sim ? "on" : "off");
} else if ((strcmp(cmd, "h") == 0) || (strcmp(cmd, "help") == 0)) {
PrintF("cont\n");
PrintF(" continue execution (alias 'c')\n");
@@ -378,7 +382,9 @@ void Debugger::Debug() {
PrintF(" delete the breakpoint\n");
PrintF("unstop\n");
PrintF(" ignore the stop instruction at the current location");
- PrintF(" from now on\n");
+ PrintF(" from now on\n");
+ PrintF("trace (alias 't')\n");
+ PrintF(" toogle the tracing of all executed statements");
} else {
PrintF("Unknown command: %s\n", cmd);
}
@@ -890,8 +896,13 @@ bool Simulator::OverflowFrom(int32_t alu_out,
// Support for VFP comparisons.
void Simulator::Compute_FPSCR_Flags(double val1, double val2) {
+ if (isnan(val1) || isnan(val2)) {
+ n_flag_FPSCR_ = false;
+ z_flag_FPSCR_ = false;
+ c_flag_FPSCR_ = true;
+ v_flag_FPSCR_ = true;
// All non-NaN cases.
- if (val1 == val2) {
+ } else if (val1 == val2) {
n_flag_FPSCR_ = false;
z_flag_FPSCR_ = true;
c_flag_FPSCR_ = true;
@@ -2022,42 +2033,62 @@ void Simulator::DecodeTypeVFP(Instr* instr) {
// Decode Type 6 coprocessor instructions.
// Dm = vmov(Rt, Rt2)
// <Rt, Rt2> = vmov(Dm)
+// Ddst = MEM(Rbase + 4*offset).
+// MEM(Rbase + 4*offset) = Dsrc.
void Simulator::DecodeType6CoprocessorIns(Instr* instr) {
ASSERT((instr->TypeField() == 6));
- int rt = instr->RtField();
- int rn = instr->RnField();
- int vm = instr->VmField();
+ if (instr->CoprocessorField() != 0xB) {
+ UNIMPLEMENTED(); // Not used by V8.
+ } else {
+ switch (instr->OpcodeField()) {
+ case 0x2:
+ // Load and store double to two GP registers
+ if (instr->Bits(7, 4) != 0x1) {
+ UNIMPLEMENTED(); // Not used by V8.
+ } else {
+ int rt = instr->RtField();
+ int rn = instr->RnField();
+ int vm = instr->VmField();
+ if (instr->HasL()) {
+ int32_t rt_int_value = get_sinteger_from_s_register(2*vm);
+ int32_t rn_int_value = get_sinteger_from_s_register(2*vm+1);
- if (instr->Bit(23) == 1) {
- UNIMPLEMENTED();
- } else if (instr->Bit(22) == 1) {
- if ((instr->Bits(27, 24) == 0xC) &&
- (instr->Bit(22) == 1) &&
- (instr->Bits(11, 8) == 0xB) &&
- (instr->Bits(7, 6) == 0x0) &&
- (instr->Bit(4) == 1)) {
- if (instr->Bit(20) == 0) {
- int32_t rs_val = get_register(rt);
- int32_t rn_val = get_register(rn);
-
- set_s_register_from_sinteger(2*vm, rs_val);
- set_s_register_from_sinteger((2*vm+1), rn_val);
-
- } else if (instr->Bit(20) == 1) {
- int32_t rt_int_value = get_sinteger_from_s_register(2*vm);
- int32_t rn_int_value = get_sinteger_from_s_register(2*vm+1);
-
- set_register(rt, rt_int_value);
- set_register(rn, rn_int_value);
+ set_register(rt, rt_int_value);
+ set_register(rn, rn_int_value);
+ } else {
+ int32_t rs_val = get_register(rt);
+ int32_t rn_val = get_register(rn);
+
+ set_s_register_from_sinteger(2*vm, rs_val);
+ set_s_register_from_sinteger((2*vm+1), rn_val);
+ }
+ }
+ break;
+ case 0x8:
+ case 0xC: { // Load and store double to memory.
+ int rn = instr->RnField();
+ int vd = instr->VdField();
+ int offset = instr->Immed8Field();
+ if (!instr->HasU()) {
+ offset = -offset;
+ }
+ int32_t address = get_register(rn) + 4 * offset;
+ if (instr->HasL()) {
+ // Load double from memory: vldr.
+ set_s_register_from_sinteger(2*vd, ReadW(address, instr));
+ set_s_register_from_sinteger(2*vd + 1, ReadW(address + 4, instr));
+ } else {
+ // Store double to memory: vstr.
+ WriteW(address, get_sinteger_from_s_register(2*vd), instr);
+ WriteW(address + 4, get_sinteger_from_s_register(2*vd + 1), instr);
+ }
+ break;
}
- } else {
- UNIMPLEMENTED();
+ default:
+ UNIMPLEMENTED(); // Not used by V8.
+ break;
}
- } else if (instr->Bit(21) == 1) {
- UNIMPLEMENTED();
- } else {
- UNIMPLEMENTED();
}
}
diff --git a/deps/v8/src/arm/simulator-arm.h b/deps/v8/src/arm/simulator-arm.h
index 3ce5b7a6b..19737301a 100644
--- a/deps/v8/src/arm/simulator-arm.h
+++ b/deps/v8/src/arm/simulator-arm.h
@@ -63,8 +63,8 @@ class SimulatorStack : public v8::internal::AllStatic {
// Call the generated regexp code directly. The entry function pointer should
// expect eight int/pointer sized arguments and return an int.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
- entry(p0, p1, p2, p3, p4, p5, p6, p7)
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
+ entry(p0, p1, p2, p3, p4, p5, p6)
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
reinterpret_cast<TryCatch*>(try_catch_address)
@@ -79,9 +79,9 @@ class SimulatorStack : public v8::internal::AllStatic {
assembler::arm::Simulator::current()->Call(FUNCTION_ADDR(entry), 5, \
p0, p1, p2, p3, p4))
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
assembler::arm::Simulator::current()->Call( \
- FUNCTION_ADDR(entry), 8, p0, p1, p2, p3, p4, p5, p6, p7)
+ FUNCTION_ADDR(entry), 7, p0, p1, p2, p3, p4, p5, p6)
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
try_catch_address == NULL ? \
diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc
index 958842d2c..d19a683dc 100644
--- a/deps/v8/src/arm/stub-cache-arm.cc
+++ b/deps/v8/src/arm/stub-cache-arm.cc
@@ -362,6 +362,369 @@ void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
}
+static void GenerateCallFunction(MacroAssembler* masm,
+ Object* object,
+ const ParameterCount& arguments,
+ Label* miss) {
+ // ----------- S t a t e -------------
+ // -- r0: receiver
+ // -- r1: function to call
+ // -----------------------------------
+
+ // Check that the function really is a function.
+ __ BranchOnSmi(r1, miss);
+ __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
+ __ b(ne, miss);
+
+ // Patch the receiver on the stack with the global proxy if
+ // necessary.
+ if (object->IsGlobalObject()) {
+ __ ldr(r3, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
+ __ str(r3, MemOperand(sp, arguments.immediate() * kPointerSize));
+ }
+
+ // Invoke the function.
+ __ InvokeFunction(r1, arguments, JUMP_FUNCTION);
+}
+
+
+static void GenerateCallConstFunction(MacroAssembler* masm,
+ JSFunction* function,
+ const ParameterCount& arguments) {
+ ASSERT(function->is_compiled());
+
+ // Get the function and setup the context.
+ __ mov(r1, Operand(Handle<JSFunction>(function)));
+ __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+
+ // Jump to the cached code (tail call).
+ Handle<Code> code(function->code());
+ ParameterCount expected(function->shared()->formal_parameter_count());
+ __ InvokeCode(code, expected, arguments,
+ RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+}
+
+
+template <class Compiler>
+static void CompileLoadInterceptor(Compiler* compiler,
+ StubCompiler* stub_compiler,
+ MacroAssembler* masm,
+ JSObject* object,
+ JSObject* holder,
+ String* name,
+ LookupResult* lookup,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Label* miss) {
+ ASSERT(holder->HasNamedInterceptor());
+ ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
+
+ // Check that the receiver isn't a smi.
+ __ BranchOnSmi(receiver, miss);
+
+ // Check that the maps haven't changed.
+ Register reg =
+ stub_compiler->CheckPrototypes(object, receiver, holder,
+ scratch1, scratch2, name, miss);
+
+ if (lookup->IsValid() && lookup->IsCacheable()) {
+ compiler->CompileCacheable(masm,
+ stub_compiler,
+ receiver,
+ reg,
+ scratch1,
+ scratch2,
+ holder,
+ lookup,
+ name,
+ miss);
+ } else {
+ compiler->CompileRegular(masm,
+ receiver,
+ reg,
+ scratch2,
+ holder,
+ miss);
+ }
+}
+
+
+static void PushInterceptorArguments(MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ Register name,
+ JSObject* holder_obj) {
+ __ push(receiver);
+ __ push(holder);
+ __ push(name);
+ InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
+ ASSERT(!Heap::InNewSpace(interceptor));
+
+ Register scratch = receiver;
+ __ mov(scratch, Operand(Handle<Object>(interceptor)));
+ __ push(scratch);
+ __ ldr(scratch, FieldMemOperand(scratch, InterceptorInfo::kDataOffset));
+ __ push(scratch);
+}
+
+
+static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ Register name,
+ JSObject* holder_obj) {
+ PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly));
+ __ mov(r0, Operand(5));
+ __ mov(r1, Operand(ref));
+
+ CEntryStub stub(1);
+ __ CallStub(&stub);
+}
+
+
+class LoadInterceptorCompiler BASE_EMBEDDED {
+ public:
+ explicit LoadInterceptorCompiler(Register name) : name_(name) {}
+
+ void CompileCacheable(MacroAssembler* masm,
+ StubCompiler* stub_compiler,
+ Register receiver,
+ Register holder,
+ Register scratch1,
+ Register scratch2,
+ JSObject* holder_obj,
+ LookupResult* lookup,
+ String* name,
+ Label* miss_label) {
+ AccessorInfo* callback = 0;
+ bool optimize = false;
+ // So far the most popular follow ups for interceptor loads are FIELD
+ // and CALLBACKS, so inline only them, other cases may be added
+ // later.
+ if (lookup->type() == FIELD) {
+ optimize = true;
+ } else if (lookup->type() == CALLBACKS) {
+ Object* callback_object = lookup->GetCallbackObject();
+ if (callback_object->IsAccessorInfo()) {
+ callback = AccessorInfo::cast(callback_object);
+ optimize = callback->getter() != NULL;
+ }
+ }
+
+ if (!optimize) {
+ CompileRegular(masm, receiver, holder, scratch2, holder_obj, miss_label);
+ return;
+ }
+
+ // Note: starting a frame here makes GC aware of pointers pushed below.
+ __ EnterInternalFrame();
+
+ if (lookup->type() == CALLBACKS) {
+ __ push(receiver);
+ }
+ __ push(holder);
+ __ push(name_);
+
+ CompileCallLoadPropertyWithInterceptor(masm,
+ receiver,
+ holder,
+ name_,
+ holder_obj);
+
+ Label interceptor_failed;
+ // Compare with no_interceptor_result_sentinel.
+ __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
+ __ cmp(r0, scratch1);
+ __ b(eq, &interceptor_failed);
+ __ LeaveInternalFrame();
+ __ Ret();
+
+ __ bind(&interceptor_failed);
+ __ pop(name_);
+ __ pop(holder);
+
+ if (lookup->type() == CALLBACKS) {
+ __ pop(receiver);
+ }
+
+ __ LeaveInternalFrame();
+
+ if (lookup->type() == FIELD) {
+ holder = stub_compiler->CheckPrototypes(holder_obj,
+ holder,
+ lookup->holder(),
+ scratch1,
+ scratch2,
+ name,
+ miss_label);
+ stub_compiler->GenerateFastPropertyLoad(masm,
+ r0,
+ holder,
+ lookup->holder(),
+ lookup->GetFieldIndex());
+ __ Ret();
+ } else {
+ ASSERT(lookup->type() == CALLBACKS);
+ ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
+ ASSERT(callback != NULL);
+ ASSERT(callback->getter() != NULL);
+
+ Label cleanup;
+ __ pop(scratch2);
+ __ push(receiver);
+ __ push(scratch2);
+
+ holder = stub_compiler->CheckPrototypes(holder_obj, holder,
+ lookup->holder(), scratch1,
+ scratch2,
+ name,
+ &cleanup);
+
+ __ push(holder);
+ __ Move(holder, Handle<AccessorInfo>(callback));
+ __ push(holder);
+ __ ldr(scratch1, FieldMemOperand(holder, AccessorInfo::kDataOffset));
+ __ push(scratch1);
+ __ push(name_);
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
+ __ TailCallRuntime(ref, 5, 1);
+
+ __ bind(&cleanup);
+ __ pop(scratch1);
+ __ pop(scratch2);
+ __ push(scratch1);
+ }
+ }
+
+
+ void CompileRegular(MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ Register scratch,
+ JSObject* holder_obj,
+ Label* miss_label) {
+ PushInterceptorArguments(masm, receiver, holder, name_, holder_obj);
+
+ ExternalReference ref = ExternalReference(
+ IC_Utility(IC::kLoadPropertyWithInterceptorForLoad));
+ __ TailCallRuntime(ref, 5, 1);
+ }
+
+ private:
+ Register name_;
+};
+
+
+class CallInterceptorCompiler BASE_EMBEDDED {
+ public:
+ CallInterceptorCompiler(const ParameterCount& arguments, Register name)
+ : arguments_(arguments), argc_(arguments.immediate()), name_(name) {}
+
+ void CompileCacheable(MacroAssembler* masm,
+ StubCompiler* stub_compiler,
+ Register receiver,
+ Register holder,
+ Register scratch1,
+ Register scratch2,
+ JSObject* holder_obj,
+ LookupResult* lookup,
+ String* name,
+ Label* miss_label) {
+ JSFunction* function = 0;
+ bool optimize = false;
+ // So far the most popular case for failed interceptor is
+ // CONSTANT_FUNCTION sitting below.
+ if (lookup->type() == CONSTANT_FUNCTION) {
+ function = lookup->GetConstantFunction();
+ // JSArray holder is a special case for call constant function
+ // (see the corresponding code).
+ if (function->is_compiled() && !holder_obj->IsJSArray()) {
+ optimize = true;
+ }
+ }
+
+ if (!optimize) {
+ CompileRegular(masm, receiver, holder, scratch2, holder_obj, miss_label);
+ return;
+ }
+
+ // Constant functions cannot sit on global object.
+ ASSERT(!lookup->holder()->IsGlobalObject());
+
+ __ EnterInternalFrame();
+ __ push(holder); // Save the holder.
+ __ push(name_); // Save the name.
+
+ CompileCallLoadPropertyWithInterceptor(masm,
+ receiver,
+ holder,
+ name_,
+ holder_obj);
+
+ ASSERT(!r0.is(name_));
+ ASSERT(!r0.is(scratch1));
+ __ pop(name_); // Restore the name.
+ __ pop(scratch1); // Restore the holder.
+ __ LeaveInternalFrame();
+
+ // Compare with no_interceptor_result_sentinel.
+ __ LoadRoot(scratch2, Heap::kNoInterceptorResultSentinelRootIndex);
+ __ cmp(r0, scratch2);
+ Label invoke;
+ __ b(ne, &invoke);
+
+ stub_compiler->CheckPrototypes(holder_obj, scratch1,
+ lookup->holder(), scratch1,
+ scratch2,
+ name,
+ miss_label);
+ GenerateCallConstFunction(masm, function, arguments_);
+
+ __ bind(&invoke);
+ }
+
+ void CompileRegular(MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ Register scratch,
+ JSObject* holder_obj,
+ Label* miss_label) {
+ __ EnterInternalFrame();
+ // Save the name_ register across the call.
+ __ push(name_);
+
+ PushInterceptorArguments(masm,
+ receiver,
+ holder,
+ name_,
+ holder_obj);
+
+ ExternalReference ref = ExternalReference(
+ IC_Utility(IC::kLoadPropertyWithInterceptorForCall));
+ __ mov(r0, Operand(5));
+ __ mov(r1, Operand(ref));
+
+ CEntryStub stub(1);
+ __ CallStub(&stub);
+
+ // Restore the name_ register.
+ __ pop(name_);
+ __ LeaveInternalFrame();
+ }
+
+ private:
+ const ParameterCount& arguments_;
+ int argc_;
+ Register name_;
+};
+
+
#undef __
#define __ ACCESS_MASM(masm())
@@ -491,30 +854,18 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
Register scratch2,
String* name,
Label* miss) {
- // Check that the receiver isn't a smi.
- __ tst(receiver, Operand(kSmiTagMask));
- __ b(eq, miss);
-
- // Check that the maps haven't changed.
- Register reg =
- CheckPrototypes(object, receiver, holder, scratch1, scratch2, name, miss);
-
- // Push the arguments on the JS stack of the caller.
- __ push(receiver); // receiver
- __ push(reg); // holder
- __ push(name_reg); // name
-
- InterceptorInfo* interceptor = holder->GetNamedInterceptor();
- ASSERT(!Heap::InNewSpace(interceptor));
- __ mov(scratch1, Operand(Handle<Object>(interceptor)));
- __ push(scratch1);
- __ ldr(scratch2, FieldMemOperand(scratch1, InterceptorInfo::kDataOffset));
- __ push(scratch2);
-
- // Do tail-call to the runtime system.
- ExternalReference load_ic_property =
- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad));
- __ TailCallRuntime(load_ic_property, 5, 1);
+ LoadInterceptorCompiler compiler(name_reg);
+ CompileLoadInterceptor(&compiler,
+ this,
+ masm(),
+ object,
+ holder,
+ name,
+ lookup,
+ receiver,
+ scratch1,
+ scratch2,
+ miss);
}
@@ -572,22 +923,7 @@ Object* CallStubCompiler::CompileCallField(Object* object,
CheckPrototypes(JSObject::cast(object), r0, holder, r3, r2, name, &miss);
GenerateFastPropertyLoad(masm(), r1, reg, holder, index);
- // Check that the function really is a function.
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &miss);
- // Get the map.
- __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
- __ b(ne, &miss);
-
- // Patch the receiver on the stack with the global proxy if
- // necessary.
- if (object->IsGlobalObject()) {
- __ ldr(r3, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
- __ str(r3, MemOperand(sp, argc * kPointerSize));
- }
-
- // Invoke the function.
- __ InvokeFunction(r1, arguments(), JUMP_FUNCTION);
+ GenerateCallFunction(masm(), object, arguments(), &miss);
// Handle call cache miss.
__ bind(&miss);
@@ -637,50 +973,65 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
break;
case STRING_CHECK:
- // Check that the object is a two-byte string or a symbol.
- __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE);
- __ b(hs, &miss);
- // Check that the maps starting from the prototype haven't changed.
- GenerateLoadGlobalFunctionPrototype(masm(),
- Context::STRING_FUNCTION_INDEX,
- r2);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), r2, holder, r3,
- r1, name, &miss);
+ if (!function->IsBuiltin()) {
+ // Calling non-builtins with a value as receiver requires boxing.
+ __ jmp(&miss);
+ } else {
+ // Check that the object is a two-byte string or a symbol.
+ __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE);
+ __ b(hs, &miss);
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateLoadGlobalFunctionPrototype(masm(),
+ Context::STRING_FUNCTION_INDEX,
+ r2);
+ CheckPrototypes(JSObject::cast(object->GetPrototype()), r2, holder, r3,
+ r1, name, &miss);
+ }
break;
case NUMBER_CHECK: {
- Label fast;
- // Check that the object is a smi or a heap number.
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &fast);
- __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
- __ b(ne, &miss);
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateLoadGlobalFunctionPrototype(masm(),
- Context::NUMBER_FUNCTION_INDEX,
- r2);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), r2, holder, r3,
- r1, name, &miss);
+ if (!function->IsBuiltin()) {
+ // Calling non-builtins with a value as receiver requires boxing.
+ __ jmp(&miss);
+ } else {
+ Label fast;
+ // Check that the object is a smi or a heap number.
+ __ tst(r1, Operand(kSmiTagMask));
+ __ b(eq, &fast);
+ __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
+ __ b(ne, &miss);
+ __ bind(&fast);
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateLoadGlobalFunctionPrototype(masm(),
+ Context::NUMBER_FUNCTION_INDEX,
+ r2);
+ CheckPrototypes(JSObject::cast(object->GetPrototype()), r2, holder, r3,
+ r1, name, &miss);
+ }
break;
}
case BOOLEAN_CHECK: {
- Label fast;
- // Check that the object is a boolean.
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ cmp(r1, ip);
- __ b(eq, &fast);
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ cmp(r1, ip);
- __ b(ne, &miss);
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateLoadGlobalFunctionPrototype(masm(),
- Context::BOOLEAN_FUNCTION_INDEX,
- r2);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), r2, holder, r3,
- r1, name, &miss);
+ if (!function->IsBuiltin()) {
+ // Calling non-builtins with a value as receiver requires boxing.
+ __ jmp(&miss);
+ } else {
+ Label fast;
+ // Check that the object is a boolean.
+ __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+ __ cmp(r1, ip);
+ __ b(eq, &fast);
+ __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+ __ cmp(r1, ip);
+ __ b(ne, &miss);
+ __ bind(&fast);
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateLoadGlobalFunctionPrototype(masm(),
+ Context::BOOLEAN_FUNCTION_INDEX,
+ r2);
+ CheckPrototypes(JSObject::cast(object->GetPrototype()), r2, holder, r3,
+ r1, name, &miss);
+ }
break;
}
@@ -700,16 +1051,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
UNREACHABLE();
}
- // Get the function and setup the context.
- __ mov(r1, Operand(Handle<JSFunction>(function)));
- __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
-
- // Jump to the cached code (tail call).
- ASSERT(function->is_compiled());
- Handle<Code> code(function->code());
- ParameterCount expected(function->shared()->formal_parameter_count());
- __ InvokeCode(code, expected, arguments(),
- RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+ GenerateCallConstFunction(masm(), function, arguments());
// Handle call cache miss.
__ bind(&miss);
@@ -733,7 +1075,34 @@ Object* CallStubCompiler::CompileCallInterceptor(Object* object,
// -----------------------------------
Label miss;
- // TODO(1224669): Implement.
+ // Get the number of arguments.
+ const int argc = arguments().immediate();
+
+ LookupResult lookup;
+ LookupPostInterceptor(holder, name, &lookup);
+
+ // Get the receiver from the stack into r0.
+ __ ldr(r0, MemOperand(sp, argc * kPointerSize));
+ // Load the name from the stack into r1.
+ __ ldr(r1, MemOperand(sp, (argc + 1) * kPointerSize));
+
+ CallInterceptorCompiler compiler(arguments(), r1);
+ CompileLoadInterceptor(&compiler,
+ this,
+ masm(),
+ JSObject::cast(object),
+ holder,
+ name,
+ &lookup,
+ r0,
+ r2,
+ r3,
+ &miss);
+
+ // Restore receiver.
+ __ ldr(r0, MemOperand(sp, argc * kPointerSize));
+
+ GenerateCallFunction(masm(), object, arguments(), &miss);
// Handle call cache miss.
__ bind(&miss);
@@ -906,7 +1275,6 @@ Object* StoreStubCompiler::CompileStoreCallback(JSObject* object,
// Handle store cache miss.
__ bind(&miss);
- __ mov(r2, Operand(Handle<String>(name))); // restore name
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
__ Jump(ic, RelocInfo::CODE_TARGET);
@@ -958,7 +1326,6 @@ Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
// Handle store cache miss.
__ bind(&miss);
- __ mov(r2, Operand(Handle<String>(name))); // restore name
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
__ Jump(ic, RelocInfo::CODE_TARGET);
@@ -1084,7 +1451,7 @@ Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* object,
__ ldr(r0, MemOperand(sp, 0));
LookupResult lookup;
- holder->LocalLookupRealNamedProperty(name, &lookup);
+ LookupPostInterceptor(holder, name, &lookup);
GenerateLoadInterceptor(object,
holder,
&lookup,
@@ -1250,7 +1617,7 @@ Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
__ b(ne, &miss);
LookupResult lookup;
- holder->LocalLookupRealNamedProperty(name, &lookup);
+ LookupPostInterceptor(holder, name, &lookup);
GenerateLoadInterceptor(receiver,
holder,
&lookup,
diff --git a/deps/v8/src/arm/virtual-frame-arm.cc b/deps/v8/src/arm/virtual-frame-arm.cc
index a33ebd420..7a8ac7266 100644
--- a/deps/v8/src/arm/virtual-frame-arm.cc
+++ b/deps/v8/src/arm/virtual-frame-arm.cc
@@ -219,36 +219,15 @@ void VirtualFrame::PushTryHandler(HandlerType type) {
}
-void VirtualFrame::RawCallStub(CodeStub* stub) {
- ASSERT(cgen()->HasValidEntryRegisters());
- __ CallStub(stub);
-}
-
-
-void VirtualFrame::CallStub(CodeStub* stub, Result* arg) {
- PrepareForCall(0, 0);
- arg->Unuse();
- RawCallStub(stub);
-}
-
-
-void VirtualFrame::CallStub(CodeStub* stub, Result* arg0, Result* arg1) {
- PrepareForCall(0, 0);
- arg0->Unuse();
- arg1->Unuse();
- RawCallStub(stub);
-}
-
-
void VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) {
- PrepareForCall(arg_count, arg_count);
+ Forget(arg_count);
ASSERT(cgen()->HasValidEntryRegisters());
__ CallRuntime(f, arg_count);
}
void VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
- PrepareForCall(arg_count, arg_count);
+ Forget(arg_count);
ASSERT(cgen()->HasValidEntryRegisters());
__ CallRuntime(id, arg_count);
}
@@ -257,102 +236,34 @@ void VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
void VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
InvokeJSFlags flags,
int arg_count) {
- PrepareForCall(arg_count, arg_count);
+ Forget(arg_count);
__ InvokeBuiltin(id, flags);
}
-void VirtualFrame::RawCallCodeObject(Handle<Code> code,
- RelocInfo::Mode rmode) {
- ASSERT(cgen()->HasValidEntryRegisters());
- __ Call(code, rmode);
-}
-
-
void VirtualFrame::CallCodeObject(Handle<Code> code,
RelocInfo::Mode rmode,
int dropped_args) {
- int spilled_args = 0;
switch (code->kind()) {
case Code::CALL_IC:
- spilled_args = dropped_args + 1;
- break;
case Code::FUNCTION:
- spilled_args = dropped_args + 1;
break;
case Code::KEYED_LOAD_IC:
- ASSERT(dropped_args == 0);
- spilled_args = 2;
- break;
- default:
- // The other types of code objects are called with values
- // in specific registers, and are handled in functions with
- // a different signature.
- UNREACHABLE();
- break;
- }
- PrepareForCall(spilled_args, dropped_args);
- RawCallCodeObject(code, rmode);
-}
-
-
-void VirtualFrame::CallCodeObject(Handle<Code> code,
- RelocInfo::Mode rmode,
- Result* arg,
- int dropped_args) {
- int spilled_args = 0;
- switch (code->kind()) {
case Code::LOAD_IC:
- ASSERT(arg->reg().is(r2));
- ASSERT(dropped_args == 0);
- spilled_args = 1;
- break;
case Code::KEYED_STORE_IC:
- ASSERT(arg->reg().is(r0));
- ASSERT(dropped_args == 0);
- spilled_args = 2;
- break;
- default:
- // No other types of code objects are called with values
- // in exactly one register.
- UNREACHABLE();
- break;
- }
- PrepareForCall(spilled_args, dropped_args);
- arg->Unuse();
- RawCallCodeObject(code, rmode);
-}
-
-
-void VirtualFrame::CallCodeObject(Handle<Code> code,
- RelocInfo::Mode rmode,
- Result* arg0,
- Result* arg1,
- int dropped_args) {
- int spilled_args = 1;
- switch (code->kind()) {
case Code::STORE_IC:
- ASSERT(arg0->reg().is(r0));
- ASSERT(arg1->reg().is(r2));
ASSERT(dropped_args == 0);
- spilled_args = 1;
break;
case Code::BUILTIN:
ASSERT(*code == Builtins::builtin(Builtins::JSConstructCall));
- ASSERT(arg0->reg().is(r0));
- ASSERT(arg1->reg().is(r1));
- spilled_args = dropped_args + 1;
break;
default:
- // No other types of code objects are called with values
- // in exactly two registers.
UNREACHABLE();
break;
}
- PrepareForCall(spilled_args, dropped_args);
- arg0->Unuse();
- arg1->Unuse();
- RawCallCodeObject(code, rmode);
+ Forget(dropped_args);
+ ASSERT(cgen()->HasValidEntryRegisters());
+ __ Call(code, rmode);
}
diff --git a/deps/v8/src/arm/virtual-frame-arm.h b/deps/v8/src/arm/virtual-frame-arm.h
index b2f0eea60..9a2f7d360 100644
--- a/deps/v8/src/arm/virtual-frame-arm.h
+++ b/deps/v8/src/arm/virtual-frame-arm.h
@@ -287,18 +287,11 @@ class VirtualFrame : public ZoneObject {
// Call stub given the number of arguments it expects on (and
// removes from) the stack.
void CallStub(CodeStub* stub, int arg_count) {
- PrepareForCall(arg_count, arg_count);
- RawCallStub(stub);
+ Forget(arg_count);
+ ASSERT(cgen()->HasValidEntryRegisters());
+ masm()->CallStub(stub);
}
- // Call stub that expects its argument in r0. The argument is given
- // as a result which must be the register r0.
- void CallStub(CodeStub* stub, Result* arg);
-
- // Call stub that expects its arguments in r1 and r0. The arguments
- // are given as results which must be the appropriate registers.
- void CallStub(CodeStub* stub, Result* arg0, Result* arg1);
-
// Call runtime given the number of arguments expected on (and
// removed from) the stack.
void CallRuntime(Runtime::Function* f, int arg_count);
@@ -311,19 +304,10 @@ class VirtualFrame : public ZoneObject {
int arg_count);
// Call into an IC stub given the number of arguments it removes
- // from the stack. Register arguments are passed as results and
- // consumed by the call.
- void CallCodeObject(Handle<Code> ic,
- RelocInfo::Mode rmode,
- int dropped_args);
+ // from the stack. Register arguments to the IC stub are implicit,
+ // and depend on the type of IC stub.
void CallCodeObject(Handle<Code> ic,
RelocInfo::Mode rmode,
- Result* arg,
- int dropped_args);
- void CallCodeObject(Handle<Code> ic,
- RelocInfo::Mode rmode,
- Result* arg0,
- Result* arg1,
int dropped_args);
// Drop a number of elements from the top of the expression stack. May
@@ -511,14 +495,6 @@ class VirtualFrame : public ZoneObject {
// Register counts are correctly updated.
int InvalidateFrameSlotAt(int index);
- // Call a code stub that has already been prepared for calling (via
- // PrepareForCall).
- void RawCallStub(CodeStub* stub);
-
- // Calls a code object which has already been prepared for calling
- // (via PrepareForCall).
- void RawCallCodeObject(Handle<Code> code, RelocInfo::Mode rmode);
-
bool Equals(VirtualFrame* other);
// Classes that need raw access to the elements_ array.
diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc
index a736fb13f..dbf2742b2 100644
--- a/deps/v8/src/assembler.cc
+++ b/deps/v8/src/assembler.cc
@@ -44,6 +44,7 @@
#include "regexp-stack.h"
#include "ast.h"
#include "regexp-macro-assembler.h"
+#include "platform.h"
// Include native regexp-macro-assembler.
#ifdef V8_NATIVE_REGEXP
#if V8_TARGET_ARCH_IA32
@@ -563,11 +564,6 @@ ExternalReference ExternalReference::perform_gc_function() {
}
-ExternalReference ExternalReference::builtin_passed_function() {
- return ExternalReference(&Builtins::builtin_passed_function);
-}
-
-
ExternalReference ExternalReference::random_positive_smi_function() {
return ExternalReference(Redirect(FUNCTION_ADDR(V8::RandomPositiveSmi)));
}
@@ -659,7 +655,7 @@ ExternalReference ExternalReference::re_check_stack_guard_state() {
#elif V8_TARGET_ARCH_ARM
function = FUNCTION_ADDR(RegExpMacroAssemblerARM::CheckStackGuardState);
#else
- UNREACHABLE("Unexpected architecture");
+ UNREACHABLE();
#endif
return ExternalReference(Redirect(function));
}
@@ -674,6 +670,10 @@ ExternalReference ExternalReference::re_case_insensitive_compare_uc16() {
FUNCTION_ADDR(NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16)));
}
+ExternalReference ExternalReference::re_word_character_map() {
+ return ExternalReference(
+ NativeRegExpMacroAssembler::word_character_map_address());
+}
ExternalReference ExternalReference::address_of_static_offsets_vector() {
return ExternalReference(OffsetsVector::static_offsets_vector_address());
@@ -711,13 +711,13 @@ static double div_two_doubles(double x, double y) {
static double mod_two_doubles(double x, double y) {
- return fmod(x, y);
+ return modulo(x, y);
}
-static int native_compare_doubles(double x, double y) {
- if (x == y) return 0;
- return x < y ? 1 : -1;
+static int native_compare_doubles(double y, double x) {
+ if (x == y) return EQUAL;
+ return x < y ? LESS : GREATER;
}
diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h
index e5efe89b6..ec47d5712 100644
--- a/deps/v8/src/assembler.h
+++ b/deps/v8/src/assembler.h
@@ -398,7 +398,6 @@ class ExternalReference BASE_EMBEDDED {
// ExternalReferenceTable in serialize.cc manually.
static ExternalReference perform_gc_function();
- static ExternalReference builtin_passed_function();
static ExternalReference random_positive_smi_function();
// Static data in the keyed lookup cache.
@@ -463,6 +462,10 @@ class ExternalReference BASE_EMBEDDED {
// Function NativeRegExpMacroAssembler::GrowStack()
static ExternalReference re_grow_stack();
+
+ // byte NativeRegExpMacroAssembler::word_character_bitmap
+ static ExternalReference re_word_character_map();
+
#endif
// This lets you register a function that rewrites all external references.
diff --git a/deps/v8/src/ast.cc b/deps/v8/src/ast.cc
index 4edcf6d84..7cb557831 100644
--- a/deps/v8/src/ast.cc
+++ b/deps/v8/src/ast.cc
@@ -146,27 +146,6 @@ bool ObjectLiteral::Property::IsCompileTimeValue() {
}
-bool ObjectLiteral::IsValidJSON() {
- int length = properties()->length();
- for (int i = 0; i < length; i++) {
- Property* prop = properties()->at(i);
- if (!prop->value()->IsValidJSON())
- return false;
- }
- return true;
-}
-
-
-bool ArrayLiteral::IsValidJSON() {
- int length = values()->length();
- for (int i = 0; i < length; i++) {
- if (!values()->at(i)->IsValidJSON())
- return false;
- }
- return true;
-}
-
-
void TargetCollector::AddTarget(BreakTarget* target) {
// Add the label to the collector, but discard duplicates.
int length = targets_->length();
diff --git a/deps/v8/src/ast.h b/deps/v8/src/ast.h
index 3961cb81d..48d0bfac0 100644
--- a/deps/v8/src/ast.h
+++ b/deps/v8/src/ast.h
@@ -180,11 +180,12 @@ class Expression: public AstNode {
kTestValue
};
- Expression() : context_(kUninitialized) {}
+ static const int kNoLabel = -1;
+
+ Expression() : num_(kNoLabel) {}
virtual Expression* AsExpression() { return this; }
- virtual bool IsValidJSON() { return false; }
virtual bool IsValidLeftHandSide() { return false; }
// Symbols that cannot be parsed as array indices are considered property
@@ -200,12 +201,14 @@ class Expression: public AstNode {
// Static type information for this expression.
StaticType* type() { return &type_; }
- Context context() { return context_; }
- void set_context(Context context) { context_ = context; }
+ int num() { return num_; }
+
+ // AST node numbering ordered by evaluation order.
+ void set_num(int n) { num_ = n; }
private:
StaticType type_;
- Context context_;
+ int num_;
};
@@ -709,8 +712,6 @@ class Literal: public Expression {
return handle_.is_identical_to(other->handle_);
}
- virtual bool IsValidJSON() { return true; }
-
virtual bool IsPropertyName() {
if (handle_->IsSymbol()) {
uint32_t ignored;
@@ -747,8 +748,6 @@ class MaterializedLiteral: public Expression {
// constants and simple object and array literals.
bool is_simple() const { return is_simple_; }
- virtual bool IsValidJSON() { return true; }
-
int depth() const { return depth_; }
private:
@@ -802,7 +801,6 @@ class ObjectLiteral: public MaterializedLiteral {
virtual ObjectLiteral* AsObjectLiteral() { return this; }
virtual void Accept(AstVisitor* v);
- virtual bool IsValidJSON();
Handle<FixedArray> constant_properties() const {
return constant_properties_;
@@ -850,7 +848,6 @@ class ArrayLiteral: public MaterializedLiteral {
virtual void Accept(AstVisitor* v);
virtual ArrayLiteral* AsArrayLiteral() { return this; }
- virtual bool IsValidJSON();
Handle<FixedArray> constant_elements() const { return constant_elements_; }
ZoneList<Expression*>* values() const { return values_; }
@@ -1184,6 +1181,9 @@ class CountOperation: public Expression {
bool is_prefix() const { return is_prefix_; }
bool is_postfix() const { return !is_prefix_; }
Token::Value op() const { return op_; }
+ Token::Value binary_op() {
+ return op_ == Token::INC ? Token::ADD : Token::SUB;
+ }
Expression* expression() const { return expression_; }
virtual void MarkAsStatement() { is_prefix_ = true; }
@@ -1324,10 +1324,9 @@ class FunctionLiteral: public Expression {
start_position_(start_position),
end_position_(end_position),
is_expression_(is_expression),
- loop_nesting_(0),
function_token_position_(RelocInfo::kNoPosition),
inferred_name_(Heap::empty_string()),
- try_fast_codegen_(false) {
+ try_full_codegen_(false) {
#ifdef DEBUG
already_compiled_ = false;
#endif
@@ -1359,16 +1358,13 @@ class FunctionLiteral: public Expression {
bool AllowsLazyCompilation();
- bool loop_nesting() const { return loop_nesting_; }
- void set_loop_nesting(int nesting) { loop_nesting_ = nesting; }
-
Handle<String> inferred_name() const { return inferred_name_; }
void set_inferred_name(Handle<String> inferred_name) {
inferred_name_ = inferred_name;
}
- bool try_fast_codegen() { return try_fast_codegen_; }
- void set_try_fast_codegen(bool flag) { try_fast_codegen_ = flag; }
+ bool try_full_codegen() { return try_full_codegen_; }
+ void set_try_full_codegen(bool flag) { try_full_codegen_ = flag; }
#ifdef DEBUG
void mark_as_compiled() {
@@ -1389,10 +1385,9 @@ class FunctionLiteral: public Expression {
int start_position_;
int end_position_;
bool is_expression_;
- int loop_nesting_;
int function_token_position_;
Handle<String> inferred_name_;
- bool try_fast_codegen_;
+ bool try_full_codegen_;
#ifdef DEBUG
bool already_compiled_;
#endif
diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc
index 9eacf57a7..78d09952a 100644
--- a/deps/v8/src/bootstrapper.cc
+++ b/deps/v8/src/bootstrapper.cc
@@ -249,26 +249,24 @@ bool PendingFixups::Process(Handle<JSBuiltinsObject> builtins) {
V8_Fatal(__FILE__, __LINE__, "Cannot resolve call to builtin %s", name);
}
#endif
- Handle<JSFunction> f = Handle<JSFunction>(JSFunction::cast(o));
+ Handle<SharedFunctionInfo> shared(JSFunction::cast(o)->shared());
// Make sure the number of parameters match the formal parameter count.
int argc = Bootstrapper::FixupFlagsArgumentsCount::decode(flags);
USE(argc);
- ASSERT(f->shared()->formal_parameter_count() == argc);
- if (!f->is_compiled()) {
- // Do lazy compilation and check for stack overflows.
- if (!CompileLazy(f, CLEAR_EXCEPTION)) {
- Clear();
- return false;
- }
+ ASSERT(shared->formal_parameter_count() == argc);
+ // Do lazy compilation if necessary and check for stack overflows.
+ if (!EnsureCompiled(shared, CLEAR_EXCEPTION)) {
+ Clear();
+ return false;
}
Code* code = Code::cast(code_[i]);
Address pc = code->instruction_start() + pc_[i];
RelocInfo target(pc, RelocInfo::CODE_TARGET, 0);
bool use_code_object = Bootstrapper::FixupFlagsUseCodeObject::decode(flags);
if (use_code_object) {
- target.set_target_object(f->code());
+ target.set_target_object(shared->code());
} else {
- target.set_target_address(f->code()->instruction_start());
+ target.set_target_address(shared->code()->instruction_start());
}
LOG(StringEvent("resolved", name));
}
@@ -960,7 +958,7 @@ bool Genesis::CompileScriptCached(Vector<const char> name,
Handle<JSFunction> fun =
Factory::NewFunctionFromBoilerplate(boilerplate, context);
- // Call function using the either the runtime object or the global
+ // Call function using either the runtime object or the global
// object as the receiver. Provide no parameters.
Handle<Object> receiver =
Handle<Object>(use_runtime_context
diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc
index aa680d76b..db0770f3a 100644
--- a/deps/v8/src/builtins.cc
+++ b/deps/v8/src/builtins.cc
@@ -36,8 +36,78 @@
namespace v8 {
namespace internal {
+namespace {
+
+// Arguments object passed to C++ builtins.
+template <BuiltinExtraArguments extra_args>
+class BuiltinArguments : public Arguments {
+ public:
+ BuiltinArguments(int length, Object** arguments)
+ : Arguments(length, arguments) { }
+
+ Object*& operator[] (int index) {
+ ASSERT(index < length());
+ return Arguments::operator[](index);
+ }
+
+ template <class S> Handle<S> at(int index) {
+ ASSERT(index < length());
+ return Arguments::at<S>(index);
+ }
+
+ Handle<Object> receiver() {
+ return Arguments::at<Object>(0);
+ }
+
+ Handle<JSFunction> called_function() {
+ STATIC_ASSERT(extra_args == NEEDS_CALLED_FUNCTION);
+ return Arguments::at<JSFunction>(Arguments::length() - 1);
+ }
+
+ // Gets the total number of arguments including the receiver (but
+ // excluding extra arguments).
+ int length() const {
+ STATIC_ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
+ return Arguments::length();
+ }
+
+#ifdef DEBUG
+ void Verify() {
+ // Check we have at least the receiver.
+ ASSERT(Arguments::length() >= 1);
+ }
+#endif
+};
+
+
+// Specialize BuiltinArguments for the called function extra argument.
+
+template <>
+int BuiltinArguments<NEEDS_CALLED_FUNCTION>::length() const {
+ return Arguments::length() - 1;
+}
+
+#ifdef DEBUG
+template <>
+void BuiltinArguments<NEEDS_CALLED_FUNCTION>::Verify() {
+ // Check we have at least the receiver and the called function.
+ ASSERT(Arguments::length() >= 2);
+ // Make sure cast to JSFunction succeeds.
+ called_function();
+}
+#endif
+
+
+#define DEF_ARG_TYPE(name, spec) \
+ typedef BuiltinArguments<spec> name##ArgumentsType;
+BUILTIN_LIST_C(DEF_ARG_TYPE)
+#undef DEF_ARG_TYPE
+
+} // namespace
+
+
// ----------------------------------------------------------------------------
-// Support macros for defining builtins in C.
+// Support macro for defining builtins in C++.
// ----------------------------------------------------------------------------
//
// A builtin function is defined by writing:
@@ -45,30 +115,26 @@ namespace internal {
// BUILTIN(name) {
// ...
// }
-// BUILTIN_END
//
-// In the body of the builtin function, the variable 'receiver' is visible.
-// The arguments can be accessed through the Arguments object args.
-//
-// args[0]: Receiver (also available as 'receiver')
-// args[1]: First argument
-// ...
-// args[n]: Last argument
-// args.length(): Number of arguments including the receiver.
-// ----------------------------------------------------------------------------
+// In the body of the builtin function the arguments can be accessed
+// through the BuiltinArguments object args.
+#ifdef DEBUG
-// TODO(428): We should consider passing whether or not the
-// builtin was invoked as a constructor as part of the
-// arguments. Maybe we also want to pass the called function?
-#define BUILTIN(name) \
- static Object* Builtin_##name(Arguments args) { \
- Handle<Object> receiver = args.at<Object>(0);
+#define BUILTIN(name) \
+ static Object* Builtin_Impl_##name(name##ArgumentsType args); \
+ static Object* Builtin_##name(name##ArgumentsType args) { \
+ args.Verify(); \
+ return Builtin_Impl_##name(args); \
+ } \
+ static Object* Builtin_Impl_##name(name##ArgumentsType args)
+#else // For release mode.
-#define BUILTIN_END \
- return Heap::undefined_value(); \
-}
+#define BUILTIN(name) \
+ static Object* Builtin_##name(name##ArgumentsType args)
+
+#endif
static inline bool CalledAsConstructor() {
@@ -109,12 +175,12 @@ Handle<Code> Builtins::GetCode(JavaScript id, bool* resolved) {
if (Top::context() != NULL) {
Object* object = Top::builtins()->javascript_builtin(id);
if (object->IsJSFunction()) {
- Handle<JSFunction> function(JSFunction::cast(object));
+ Handle<SharedFunctionInfo> shared(JSFunction::cast(object)->shared());
// Make sure the number of parameters match the formal parameter count.
- ASSERT(function->shared()->formal_parameter_count() ==
+ ASSERT(shared->formal_parameter_count() ==
Builtins::GetArgumentsCount(id));
- if (function->is_compiled() || CompileLazy(function, CLEAR_EXCEPTION)) {
- code = function->code();
+ if (EnsureCompiled(shared, CLEAR_EXCEPTION)) {
+ code = shared->code();
*resolved = true;
}
}
@@ -126,13 +192,13 @@ Handle<Code> Builtins::GetCode(JavaScript id, bool* resolved) {
BUILTIN(Illegal) {
UNREACHABLE();
+ return Heap::undefined_value(); // Make compiler happy.
}
-BUILTIN_END
BUILTIN(EmptyFunction) {
+ return Heap::undefined_value();
}
-BUILTIN_END
BUILTIN(ArrayCodeGeneric) {
@@ -140,7 +206,7 @@ BUILTIN(ArrayCodeGeneric) {
JSArray* array;
if (CalledAsConstructor()) {
- array = JSArray::cast(*receiver);
+ array = JSArray::cast(*args.receiver());
} else {
// Allocate the JS Array
JSFunction* constructor =
@@ -181,8 +247,10 @@ BUILTIN(ArrayCodeGeneric) {
Smi* len = Smi::FromInt(number_of_elements);
Object* obj = Heap::AllocateFixedArrayWithHoles(len->value());
if (obj->IsFailure()) return obj;
+
+ AssertNoAllocation no_gc;
FixedArray* elms = FixedArray::cast(obj);
- WriteBarrierMode mode = elms->GetWriteBarrierMode();
+ WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
// Fill in the content
for (int index = 0; index < number_of_elements; index++) {
elms->set(index, args[index+1], mode);
@@ -190,15 +258,14 @@ BUILTIN(ArrayCodeGeneric) {
// Set length and elements on the array.
array->set_elements(FixedArray::cast(obj));
- array->set_length(len, SKIP_WRITE_BARRIER);
+ array->set_length(len);
return array;
}
-BUILTIN_END
BUILTIN(ArrayPush) {
- JSArray* array = JSArray::cast(*receiver);
+ JSArray* array = JSArray::cast(*args.receiver());
ASSERT(array->HasFastElements());
// Make sure we have space for the elements.
@@ -218,8 +285,10 @@ BUILTIN(ArrayPush) {
int capacity = new_length + (new_length >> 1) + 16;
Object* obj = Heap::AllocateFixedArrayWithHoles(capacity);
if (obj->IsFailure()) return obj;
+
+ AssertNoAllocation no_gc;
FixedArray* new_elms = FixedArray::cast(obj);
- WriteBarrierMode mode = new_elms->GetWriteBarrierMode();
+ WriteBarrierMode mode = new_elms->GetWriteBarrierMode(no_gc);
// Fill out the new array with old elements.
for (int i = 0; i < len; i++) new_elms->set(i, elms->get(i), mode);
// Add the provided values.
@@ -230,14 +299,13 @@ BUILTIN(ArrayPush) {
array->set_elements(new_elms);
}
// Set the length.
- array->set_length(Smi::FromInt(new_length), SKIP_WRITE_BARRIER);
+ array->set_length(Smi::FromInt(new_length));
return array->length();
}
-BUILTIN_END
BUILTIN(ArrayPop) {
- JSArray* array = JSArray::cast(*receiver);
+ JSArray* array = JSArray::cast(*args.receiver());
ASSERT(array->HasFastElements());
Object* undefined = Heap::undefined_value();
@@ -249,7 +317,7 @@ BUILTIN(ArrayPop) {
Object* top = elms->get(len - 1);
// Set the length.
- array->set_length(Smi::FromInt(len - 1), SKIP_WRITE_BARRIER);
+ array->set_length(Smi::FromInt(len - 1));
if (!top->IsTheHole()) {
// Delete the top element.
@@ -265,7 +333,6 @@ BUILTIN(ArrayPop) {
return top;
}
-BUILTIN_END
// -----------------------------------------------------------------------------
@@ -320,20 +387,20 @@ static inline Object* TypeCheck(int argc,
}
-BUILTIN(HandleApiCall) {
- HandleScope scope;
- bool is_construct = CalledAsConstructor();
+template <bool is_construct>
+static Object* HandleApiCallHelper(
+ BuiltinArguments<NEEDS_CALLED_FUNCTION> args) {
+ ASSERT(is_construct == CalledAsConstructor());
- // TODO(428): Remove use of static variable, handle API callbacks directly.
- Handle<JSFunction> function =
- Handle<JSFunction>(JSFunction::cast(Builtins::builtin_passed_function));
+ HandleScope scope;
+ Handle<JSFunction> function = args.called_function();
if (is_construct) {
Handle<FunctionTemplateInfo> desc =
Handle<FunctionTemplateInfo>(
FunctionTemplateInfo::cast(function->shared()->function_data()));
bool pending_exception = false;
- Factory::ConfigureInstance(desc, Handle<JSObject>::cast(receiver),
+ Factory::ConfigureInstance(desc, Handle<JSObject>::cast(args.receiver()),
&pending_exception);
ASSERT(Top::has_pending_exception() == pending_exception);
if (pending_exception) return Failure::Exception();
@@ -359,15 +426,13 @@ BUILTIN(HandleApiCall) {
Object* data_obj = call_data->data();
Object* result;
- v8::Local<v8::Object> self =
- v8::Utils::ToLocal(Handle<JSObject>::cast(receiver));
Handle<Object> data_handle(data_obj);
v8::Local<v8::Value> data = v8::Utils::ToLocal(data_handle);
ASSERT(raw_holder->IsJSObject());
v8::Local<v8::Function> callee = v8::Utils::ToLocal(function);
Handle<JSObject> holder_handle(JSObject::cast(raw_holder));
v8::Local<v8::Object> holder = v8::Utils::ToLocal(holder_handle);
- LOG(ApiObjectAccess("call", JSObject::cast(*receiver)));
+ LOG(ApiObjectAccess("call", JSObject::cast(*args.receiver())));
v8::Arguments new_args = v8::ImplementationUtilities::NewArguments(
data,
holder,
@@ -395,16 +460,26 @@ BUILTIN(HandleApiCall) {
if (!is_construct || result->IsJSObject()) return result;
}
- return *receiver;
+ return *args.receiver();
+}
+
+
+BUILTIN(HandleApiCall) {
+ return HandleApiCallHelper<false>(args);
+}
+
+
+BUILTIN(HandleApiCallConstruct) {
+ return HandleApiCallHelper<true>(args);
}
-BUILTIN_END
// Helper function to handle calls to non-function objects created through the
// API. The object can be called as either a constructor (using new) or just as
// a function (without new).
-static Object* HandleApiCallAsFunctionOrConstructor(bool is_construct_call,
- Arguments args) {
+static Object* HandleApiCallAsFunctionOrConstructor(
+ bool is_construct_call,
+ BuiltinArguments<NO_EXTRA_ARGUMENTS> args) {
// Non-functions are never called as constructors. Even if this is an object
// called as a constructor the delegate call is not a construct call.
ASSERT(!CalledAsConstructor());
@@ -412,7 +487,7 @@ static Object* HandleApiCallAsFunctionOrConstructor(bool is_construct_call,
Handle<Object> receiver = args.at<Object>(0);
// Get the object called.
- JSObject* obj = JSObject::cast(*receiver);
+ JSObject* obj = JSObject::cast(*args.receiver());
// Get the invocation callback from the function descriptor that was
// used to create the called object.
@@ -432,12 +507,12 @@ static Object* HandleApiCallAsFunctionOrConstructor(bool is_construct_call,
Object* result;
{ HandleScope scope;
v8::Local<v8::Object> self =
- v8::Utils::ToLocal(Handle<JSObject>::cast(receiver));
+ v8::Utils::ToLocal(Handle<JSObject>::cast(args.receiver()));
Handle<Object> data_handle(data_obj);
v8::Local<v8::Value> data = v8::Utils::ToLocal(data_handle);
Handle<JSFunction> callee_handle(constructor);
v8::Local<v8::Function> callee = v8::Utils::ToLocal(callee_handle);
- LOG(ApiObjectAccess("call non-function", JSObject::cast(*receiver)));
+ LOG(ApiObjectAccess("call non-function", JSObject::cast(*args.receiver())));
v8::Arguments new_args = v8::ImplementationUtilities::NewArguments(
data,
self,
@@ -471,7 +546,6 @@ static Object* HandleApiCallAsFunctionOrConstructor(bool is_construct_call,
BUILTIN(HandleApiCallAsFunction) {
return HandleApiCallAsFunctionOrConstructor(false, args);
}
-BUILTIN_END
// Handle calls to non-function objects created through the API. This delegate
@@ -479,14 +553,6 @@ BUILTIN_END
BUILTIN(HandleApiCallAsConstructor) {
return HandleApiCallAsFunctionOrConstructor(true, args);
}
-BUILTIN_END
-
-
-// TODO(1238487): This is a nasty hack. We need to improve the way we
-// call builtins considerable to get rid of this and the hairy macros
-// in builtins.cc.
-Object* Builtins::builtin_passed_function;
-
static void Generate_LoadIC_ArrayLength(MacroAssembler* masm) {
@@ -708,7 +774,7 @@ static void Generate_StubNoRegisters_DebugBreak(MacroAssembler* masm) {
Object* Builtins::builtins_[builtin_count] = { NULL, };
const char* Builtins::names_[builtin_count] = { NULL, };
-#define DEF_ENUM_C(name) FUNCTION_ADDR(Builtin_##name),
+#define DEF_ENUM_C(name, ignore) FUNCTION_ADDR(Builtin_##name),
Address Builtins::c_functions_[cfunction_count] = {
BUILTIN_LIST_C(DEF_ENUM_C)
};
@@ -739,14 +805,16 @@ void Builtins::Setup(bool create_heap_objects) {
const char* s_name; // name is only used for generating log information.
int name;
Code::Flags flags;
+ BuiltinExtraArguments extra_args;
};
-#define DEF_FUNCTION_PTR_C(name) \
- { FUNCTION_ADDR(Generate_Adaptor), \
- FUNCTION_ADDR(Builtin_##name), \
- #name, \
- c_##name, \
- Code::ComputeFlags(Code::BUILTIN) \
+#define DEF_FUNCTION_PTR_C(name, extra_args) \
+ { FUNCTION_ADDR(Generate_Adaptor), \
+ FUNCTION_ADDR(Builtin_##name), \
+ #name, \
+ c_##name, \
+ Code::ComputeFlags(Code::BUILTIN), \
+ extra_args \
},
#define DEF_FUNCTION_PTR_A(name, kind, state) \
@@ -754,7 +822,8 @@ void Builtins::Setup(bool create_heap_objects) {
NULL, \
#name, \
name, \
- Code::ComputeFlags(Code::kind, NOT_IN_LOOP, state) \
+ Code::ComputeFlags(Code::kind, NOT_IN_LOOP, state), \
+ NO_EXTRA_ARGUMENTS \
},
// Define array of pointers to generators and C builtin functions.
@@ -763,7 +832,8 @@ void Builtins::Setup(bool create_heap_objects) {
BUILTIN_LIST_A(DEF_FUNCTION_PTR_A)
BUILTIN_LIST_DEBUG_A(DEF_FUNCTION_PTR_A)
// Terminator:
- { NULL, NULL, NULL, builtin_count, static_cast<Code::Flags>(0) }
+ { NULL, NULL, NULL, builtin_count, static_cast<Code::Flags>(0),
+ NO_EXTRA_ARGUMENTS }
};
#undef DEF_FUNCTION_PTR_C
@@ -779,12 +849,12 @@ void Builtins::Setup(bool create_heap_objects) {
if (create_heap_objects) {
MacroAssembler masm(buffer, sizeof buffer);
// Generate the code/adaptor.
- typedef void (*Generator)(MacroAssembler*, int);
+ typedef void (*Generator)(MacroAssembler*, int, BuiltinExtraArguments);
Generator g = FUNCTION_CAST<Generator>(functions[i].generator);
// We pass all arguments to the generator, but it may not use all of
// them. This works because the first arguments are on top of the
// stack.
- g(&masm, functions[i].name);
+ g(&masm, functions[i].name, functions[i].extra_args);
// Move the code into the object heap.
CodeDesc desc;
masm.GetCode(&desc);
diff --git a/deps/v8/src/builtins.h b/deps/v8/src/builtins.h
index f0ceab667..418948f75 100644
--- a/deps/v8/src/builtins.h
+++ b/deps/v8/src/builtins.h
@@ -31,20 +31,28 @@
namespace v8 {
namespace internal {
-// Define list of builtins implemented in C.
-#define BUILTIN_LIST_C(V) \
- V(Illegal) \
- \
- V(EmptyFunction) \
- \
- V(ArrayCodeGeneric) \
- \
- V(ArrayPush) \
- V(ArrayPop) \
- \
- V(HandleApiCall) \
- V(HandleApiCallAsFunction) \
- V(HandleApiCallAsConstructor)
+// Specifies extra arguments required by a C++ builtin.
+enum BuiltinExtraArguments {
+ NO_EXTRA_ARGUMENTS = 0,
+ NEEDS_CALLED_FUNCTION = 1
+};
+
+
+// Define list of builtins implemented in C++.
+#define BUILTIN_LIST_C(V) \
+ V(Illegal, NO_EXTRA_ARGUMENTS) \
+ \
+ V(EmptyFunction, NO_EXTRA_ARGUMENTS) \
+ \
+ V(ArrayCodeGeneric, NO_EXTRA_ARGUMENTS) \
+ \
+ V(ArrayPush, NO_EXTRA_ARGUMENTS) \
+ V(ArrayPop, NO_EXTRA_ARGUMENTS) \
+ \
+ V(HandleApiCall, NEEDS_CALLED_FUNCTION) \
+ V(HandleApiCallConstruct, NEEDS_CALLED_FUNCTION) \
+ V(HandleApiCallAsFunction, NO_EXTRA_ARGUMENTS) \
+ V(HandleApiCallAsConstructor, NO_EXTRA_ARGUMENTS)
// Define list of builtins implemented in assembly.
@@ -52,6 +60,7 @@ namespace internal {
V(ArgumentsAdaptorTrampoline, BUILTIN, UNINITIALIZED) \
V(JSConstructCall, BUILTIN, UNINITIALIZED) \
V(JSConstructStubGeneric, BUILTIN, UNINITIALIZED) \
+ V(JSConstructStubApi, BUILTIN, UNINITIALIZED) \
V(JSEntryTrampoline, BUILTIN, UNINITIALIZED) \
V(JSConstructEntryTrampoline, BUILTIN, UNINITIALIZED) \
\
@@ -169,7 +178,7 @@ class Builtins : public AllStatic {
static const char* Lookup(byte* pc);
enum Name {
-#define DEF_ENUM_C(name) name,
+#define DEF_ENUM_C(name, ignore) name,
#define DEF_ENUM_A(name, kind, state) name,
BUILTIN_LIST_C(DEF_ENUM_C)
BUILTIN_LIST_A(DEF_ENUM_A)
@@ -180,7 +189,7 @@ class Builtins : public AllStatic {
};
enum CFunctionId {
-#define DEF_ENUM_C(name) c_##name,
+#define DEF_ENUM_C(name, ignore) c_##name,
BUILTIN_LIST_C(DEF_ENUM_C)
#undef DEF_ENUM_C
cfunction_count
@@ -212,8 +221,6 @@ class Builtins : public AllStatic {
static Handle<Code> GetCode(JavaScript id, bool* resolved);
static int NumberOfJavaScriptBuiltins() { return id_count; }
- static Object* builtin_passed_function;
-
private:
// The external C++ functions called from the code.
static Address c_functions_[cfunction_count];
@@ -226,9 +233,12 @@ class Builtins : public AllStatic {
static const char* javascript_names_[id_count];
static int javascript_argc_[id_count];
- static void Generate_Adaptor(MacroAssembler* masm, CFunctionId id);
+ static void Generate_Adaptor(MacroAssembler* masm,
+ CFunctionId id,
+ BuiltinExtraArguments extra_args);
static void Generate_JSConstructCall(MacroAssembler* masm);
static void Generate_JSConstructStubGeneric(MacroAssembler* masm);
+ static void Generate_JSConstructStubApi(MacroAssembler* masm);
static void Generate_JSEntryTrampoline(MacroAssembler* masm);
static void Generate_JSConstructEntryTrampoline(MacroAssembler* masm);
static void Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm);
diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h
index 052c1cafa..16267f64e 100644
--- a/deps/v8/src/code-stubs.h
+++ b/deps/v8/src/code-stubs.h
@@ -55,9 +55,9 @@ namespace internal {
V(CounterOp) \
V(ArgumentsAccess) \
V(RegExpExec) \
- V(Runtime) \
V(CEntry) \
- V(JSEntry)
+ V(JSEntry) \
+ V(DebuggerStatement)
// List of code stubs only used on ARM platforms.
#ifdef V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc
index fd7e0e80b..cb6089b8b 100644
--- a/deps/v8/src/codegen.cc
+++ b/deps/v8/src/codegen.cc
@@ -216,13 +216,19 @@ Handle<Code> CodeGenerator::MakeCodeEpilogue(FunctionLiteral* fun,
// the compiler.cc code.
Handle<Code> CodeGenerator::MakeCode(FunctionLiteral* fun,
Handle<Script> script,
- bool is_eval) {
+ bool is_eval,
+ CompilationInfo* info) {
+ if (!script->IsUndefined() && !script->source()->IsUndefined()) {
+ int len = String::cast(script->source())->length();
+ Counters::total_old_codegen_source_size.Increment(len);
+ }
MakeCodePrologue(fun);
// Generate code.
const int kInitialBufferSize = 4 * KB;
- CodeGenerator cgen(kInitialBufferSize, script, is_eval);
+ MacroAssembler masm(NULL, kInitialBufferSize);
+ CodeGenerator cgen(&masm, script, is_eval);
CodeGeneratorScope scope(&cgen);
- cgen.GenCode(fun);
+ cgen.Generate(fun, PRIMARY, info);
if (cgen.HasStackOverflow()) {
ASSERT(!Top::has_pending_exception());
return Handle<Code>::null();
@@ -344,6 +350,7 @@ CodeGenerator::InlineRuntimeLUT CodeGenerator::kInlineRuntimeLUT[] = {
{&CodeGenerator::GenerateRandomPositiveSmi, "_RandomPositiveSmi"},
{&CodeGenerator::GenerateIsObject, "_IsObject"},
{&CodeGenerator::GenerateIsFunction, "_IsFunction"},
+ {&CodeGenerator::GenerateIsUndetectableObject, "_IsUndetectableObject"},
{&CodeGenerator::GenerateStringAdd, "_StringAdd"},
{&CodeGenerator::GenerateSubString, "_SubString"},
{&CodeGenerator::GenerateStringCompare, "_StringCompare"},
@@ -446,11 +453,6 @@ void CodeGenerator::CodeForSourcePosition(int pos) {
}
-const char* RuntimeStub::GetName() {
- return Runtime::FunctionForId(id_)->stub_name;
-}
-
-
const char* GenericUnaryOpStub::GetName() {
switch (op_) {
case Token::SUB:
@@ -468,14 +470,6 @@ const char* GenericUnaryOpStub::GetName() {
}
-void RuntimeStub::Generate(MacroAssembler* masm) {
- Runtime::Function* f = Runtime::FunctionForId(id_);
- masm->TailCallRuntime(ExternalReference(f),
- num_arguments_,
- f->result_size);
-}
-
-
void ArgumentsAccessStub::Generate(MacroAssembler* masm) {
switch (type_) {
case READ_LENGTH: GenerateReadLength(masm); break;
@@ -485,6 +479,17 @@ void ArgumentsAccessStub::Generate(MacroAssembler* masm) {
}
+int CEntryStub::MinorKey() {
+ ASSERT(result_size_ <= 2);
+#ifdef _WIN64
+ return ExitFrameModeBits::encode(mode_)
+ | IndirectResultBits::encode(result_size_ > 1);
+#else
+ return ExitFrameModeBits::encode(mode_);
+#endif
+}
+
+
bool ApiGetterEntryStub::GetCustomCache(Code** code_out) {
Object* cache = info()->load_stub_cache();
if (cache->IsUndefined()) {
@@ -501,4 +506,10 @@ void ApiGetterEntryStub::SetCustomCache(Code* value) {
}
+void DebuggerStatementStub::Generate(MacroAssembler* masm) {
+ Runtime::Function* f = Runtime::FunctionForId(Runtime::kDebugBreak);
+ masm->TailCallRuntime(ExternalReference(f), 0, f->result_size);
+}
+
+
} } // namespace v8::internal
diff --git a/deps/v8/src/codegen.h b/deps/v8/src/codegen.h
index 2247c5c80..d0be5f1b1 100644
--- a/deps/v8/src/codegen.h
+++ b/deps/v8/src/codegen.h
@@ -55,7 +55,7 @@
// CodeGenerator
// ~CodeGenerator
// ProcessDeferred
-// GenCode
+// Generate
// ComputeLazyCompile
// BuildBoilerplate
// ComputeCallInitialize
@@ -181,43 +181,6 @@ class DeferredCode: public ZoneObject {
DISALLOW_COPY_AND_ASSIGN(DeferredCode);
};
-
-// RuntimeStub models code stubs calling entry points in the Runtime class.
-class RuntimeStub : public CodeStub {
- public:
- explicit RuntimeStub(Runtime::FunctionId id, int num_arguments)
- : id_(id), num_arguments_(num_arguments) { }
-
- void Generate(MacroAssembler* masm);
-
- // Disassembler support. It is useful to be able to print the name
- // of the runtime function called through this stub.
- static const char* GetNameFromMinorKey(int minor_key) {
- return Runtime::FunctionForId(IdField::decode(minor_key))->stub_name;
- }
-
- private:
- Runtime::FunctionId id_;
- int num_arguments_;
-
- class ArgumentField: public BitField<int, 0, 16> {};
- class IdField: public BitField<Runtime::FunctionId, 16, kMinorBits - 16> {};
-
- Major MajorKey() { return Runtime; }
- int MinorKey() {
- return IdField::encode(id_) | ArgumentField::encode(num_arguments_);
- }
-
- const char* GetName();
-
-#ifdef DEBUG
- void Print() {
- PrintF("RuntimeStub (id %s)\n", Runtime::FunctionForId(id_)->name);
- }
-#endif
-};
-
-
class StackCheckStub : public CodeStub {
public:
StackCheckStub() { }
@@ -367,25 +330,30 @@ class CompareStub: public CodeStub {
class CEntryStub : public CodeStub {
public:
- explicit CEntryStub(int result_size) : result_size_(result_size) { }
+ explicit CEntryStub(int result_size,
+ ExitFrame::Mode mode = ExitFrame::MODE_NORMAL)
+ : result_size_(result_size), mode_(mode) { }
- void Generate(MacroAssembler* masm) { GenerateBody(masm, false); }
+ void Generate(MacroAssembler* masm);
- protected:
- void GenerateBody(MacroAssembler* masm, bool is_debug_break);
+ private:
void GenerateCore(MacroAssembler* masm,
Label* throw_normal_exception,
Label* throw_termination_exception,
Label* throw_out_of_memory_exception,
- ExitFrame::Mode mode,
bool do_gc,
bool always_allocate_scope);
void GenerateThrowTOS(MacroAssembler* masm);
void GenerateThrowUncatchable(MacroAssembler* masm,
UncatchableExceptionType type);
- private:
+
// Number of pointers/values returned.
- int result_size_;
+ const int result_size_;
+ const ExitFrame::Mode mode_;
+
+ // Minor key encoding
+ class ExitFrameModeBits: public BitField<ExitFrame::Mode, 0, 1> {};
+ class IndirectResultBits: public BitField<bool, 1, 1> {};
Major MajorKey() { return CEntry; }
// Minor key must differ if different result_size_ values means different
@@ -422,16 +390,18 @@ class ApiGetterEntryStub : public CodeStub {
};
-class CEntryDebugBreakStub : public CEntryStub {
+// Mark the debugger statement to be recognized by debugger (by the MajorKey)
+class DebuggerStatementStub : public CodeStub {
public:
- CEntryDebugBreakStub() : CEntryStub(1) { }
+ DebuggerStatementStub() { }
- void Generate(MacroAssembler* masm) { GenerateBody(masm, true); }
+ void Generate(MacroAssembler* masm);
private:
- int MinorKey() { return 1; }
+ Major MajorKey() { return DebuggerStatement; }
+ int MinorKey() { return 0; }
- const char* GetName() { return "CEntryDebugBreakStub"; }
+ const char* GetName() { return "DebuggerStatementStub"; }
};
@@ -516,6 +486,64 @@ class RegExpExecStub: public CodeStub {
};
+class CallFunctionStub: public CodeStub {
+ public:
+ CallFunctionStub(int argc, InLoopFlag in_loop, CallFunctionFlags flags)
+ : argc_(argc), in_loop_(in_loop), flags_(flags) { }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ int argc_;
+ InLoopFlag in_loop_;
+ CallFunctionFlags flags_;
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("CallFunctionStub (args %d, in_loop %d, flags %d)\n",
+ argc_,
+ static_cast<int>(in_loop_),
+ static_cast<int>(flags_));
+ }
+#endif
+
+ // Minor key encoding in 31 bits AAAAAAAAAAAAAAAAAAAAAFI A(rgs)F(lag)I(nloop).
+ class InLoopBits: public BitField<InLoopFlag, 0, 1> {};
+ class FlagBits: public BitField<CallFunctionFlags, 1, 1> {};
+ class ArgcBits: public BitField<int, 2, 29> {};
+
+ Major MajorKey() { return CallFunction; }
+ int MinorKey() {
+ // Encode the parameters in a unique 31 bit value.
+ return InLoopBits::encode(in_loop_)
+ | FlagBits::encode(flags_)
+ | ArgcBits::encode(argc_);
+ }
+
+ InLoopFlag InLoop() { return in_loop_; }
+ bool ReceiverMightBeValue() {
+ return (flags_ & RECEIVER_MIGHT_BE_VALUE) != 0;
+ }
+
+ public:
+ static int ExtractArgcFromMinorKey(int minor_key) {
+ return ArgcBits::decode(minor_key);
+ }
+};
+
+
+class ToBooleanStub: public CodeStub {
+ public:
+ ToBooleanStub() { }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ Major MajorKey() { return ToBoolean; }
+ int MinorKey() { return 0; }
+};
+
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index 420b809e7..a5e1e5c88 100644..100755
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -33,6 +33,7 @@
#include "compiler.h"
#include "debug.h"
#include "fast-codegen.h"
+#include "full-codegen.h"
#include "oprofile-agent.h"
#include "rewriter.h"
#include "scopes.h"
@@ -42,51 +43,11 @@ namespace v8 {
namespace internal {
-class CodeGenSelector: public AstVisitor {
- public:
- enum CodeGenTag { NORMAL, FAST };
-
- CodeGenSelector()
- : has_supported_syntax_(true),
- context_(Expression::kUninitialized) {
- }
-
- CodeGenTag Select(FunctionLiteral* fun);
-
- private:
- // Visit an expression in a given expression context.
- void ProcessExpression(Expression* expr, Expression::Context context) {
- ASSERT(expr->context() == Expression::kUninitialized ||
- expr->context() == context);
- Expression::Context saved = context_;
- context_ = context;
- Visit(expr);
- expr->set_context(context);
- context_ = saved;
- }
-
- void VisitDeclarations(ZoneList<Declaration*>* decls);
- void VisitStatements(ZoneList<Statement*>* stmts);
-
- // AST node visit functions.
-#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
- AST_NODE_LIST(DECLARE_VISIT)
-#undef DECLARE_VISIT
-
- bool has_supported_syntax_;
-
- // The desired expression context of the currently visited expression.
- Expression::Context context_;
-
- DISALLOW_COPY_AND_ASSIGN(CodeGenSelector);
-};
-
-
static Handle<Code> MakeCode(FunctionLiteral* literal,
Handle<Script> script,
Handle<Context> context,
bool is_eval,
- Handle<SharedFunctionInfo> shared) {
+ CompilationInfo* info) {
ASSERT(literal != NULL);
// Rewrite the AST by introducing .result assignments where needed.
@@ -121,39 +82,41 @@ static Handle<Code> MakeCode(FunctionLiteral* literal,
return Handle<Code>::null();
}
- // Generate code and return it.
- if (FLAG_fast_compiler) {
- // If there is no shared function info, try the fast code
- // generator for code in the global scope. Otherwise obey the
- // explicit hint in the shared function info.
- // If always_fast_compiler is true, always try the fast compiler.
- if (shared.is_null() && !literal->scope()->is_global_scope() &&
- !FLAG_always_fast_compiler) {
- if (FLAG_trace_bailout) PrintF("Non-global scope\n");
- } else if (!shared.is_null() && !shared->try_fast_codegen() &&
- !FLAG_always_fast_compiler) {
- if (FLAG_trace_bailout) PrintF("No hint to try fast\n");
- } else {
- CodeGenSelector selector;
- CodeGenSelector::CodeGenTag code_gen = selector.Select(literal);
- if (code_gen == CodeGenSelector::FAST) {
- return FastCodeGenerator::MakeCode(literal, script, is_eval);
- }
- ASSERT(code_gen == CodeGenSelector::NORMAL);
+ // Generate code and return it. Code generator selection is governed by
+ // which backends are enabled and whether the function is considered
+ // run-once code or not:
+ //
+ // --full-compiler enables the dedicated backend for code we expect to be
+ // run once
+ // --fast-compiler enables a speculative optimizing backend (for
+ // non-run-once code)
+ //
+ // The normal choice of backend can be overridden with the flags
+ // --always-full-compiler and --always-fast-compiler, which are mutually
+ // incompatible.
+ CHECK(!FLAG_always_full_compiler || !FLAG_always_fast_compiler);
+
+ Handle<SharedFunctionInfo> shared = info->shared_info();
+ bool is_run_once = (shared.is_null())
+ ? literal->scope()->is_global_scope()
+ : (shared->is_toplevel() || shared->try_full_codegen());
+
+ if (FLAG_always_full_compiler || (FLAG_full_compiler && is_run_once)) {
+ FullCodeGenSyntaxChecker checker;
+ checker.Check(literal);
+ if (checker.has_supported_syntax()) {
+ return FullCodeGenerator::MakeCode(literal, script, is_eval);
+ }
+ } else if (FLAG_always_fast_compiler ||
+ (FLAG_fast_compiler && !is_run_once)) {
+ FastCodeGenSyntaxChecker checker;
+ checker.Check(literal, info);
+ if (checker.has_supported_syntax()) {
+ return FastCodeGenerator::MakeCode(literal, script, is_eval, info);
}
}
- return CodeGenerator::MakeCode(literal, script, is_eval);
-}
-
-static bool IsValidJSON(FunctionLiteral* lit) {
- if (lit->body()->length() != 1)
- return false;
- Statement* stmt = lit->body()->at(0);
- if (stmt->AsExpressionStatement() == NULL)
- return false;
- Expression* expr = stmt->AsExpressionStatement()->expression();
- return expr->IsValidJSON();
+ return CodeGenerator::MakeCode(literal, script, is_eval, info);
}
@@ -171,8 +134,8 @@ static Handle<JSFunction> MakeFunction(bool is_global,
ASSERT(!i::Top::global_context().is_null());
script->set_context_data((*i::Top::global_context())->data());
-#ifdef ENABLE_DEBUGGER_SUPPORT
bool is_json = (validate == Compiler::VALIDATE_JSON);
+#ifdef ENABLE_DEBUGGER_SUPPORT
if (is_eval || is_json) {
script->set_compilation_type(
is_json ? Smi::FromInt(Script::COMPILATION_TYPE_JSON) :
@@ -180,12 +143,14 @@ static Handle<JSFunction> MakeFunction(bool is_global,
// For eval scripts add information on the function from which eval was
// called.
if (is_eval) {
- JavaScriptFrameIterator it;
- script->set_eval_from_shared(
- JSFunction::cast(it.frame()->function())->shared());
- int offset = static_cast<int>(
- it.frame()->pc() - it.frame()->code()->instruction_start());
- script->set_eval_from_instructions_offset(Smi::FromInt(offset));
+ StackTraceFrameIterator it;
+ if (!it.done()) {
+ script->set_eval_from_shared(
+ JSFunction::cast(it.frame()->function())->shared());
+ int offset = static_cast<int>(
+ it.frame()->pc() - it.frame()->code()->instruction_start());
+ script->set_eval_from_instructions_offset(Smi::FromInt(offset));
+ }
}
}
@@ -197,7 +162,8 @@ static Handle<JSFunction> MakeFunction(bool is_global,
ASSERT(is_eval || is_global);
// Build AST.
- FunctionLiteral* lit = MakeAST(is_global, script, extension, pre_data);
+ FunctionLiteral* lit =
+ MakeAST(is_global, script, extension, pre_data, is_json);
// Check for parse errors.
if (lit == NULL) {
@@ -205,19 +171,6 @@ static Handle<JSFunction> MakeFunction(bool is_global,
return Handle<JSFunction>::null();
}
- // When parsing JSON we do an ordinary parse and then afterwards
- // check the AST to ensure it was well-formed. If not we give a
- // syntax error.
- if (validate == Compiler::VALIDATE_JSON && !IsValidJSON(lit)) {
- HandleScope scope;
- Handle<JSArray> args = Factory::NewJSArray(1);
- Handle<Object> source(script->source());
- SetElement(args, 0, source);
- Handle<Object> result = Factory::NewSyntaxError("invalid_json", args);
- Top::Throw(*result, NULL);
- return Handle<JSFunction>::null();
- }
-
// Measure how long it takes to do the compilation; only take the
// rest of the function into account to avoid overlap with the
// parsing statistics.
@@ -227,8 +180,10 @@ static Handle<JSFunction> MakeFunction(bool is_global,
HistogramTimerScope timer(rate);
// Compile the code.
- Handle<Code> code = MakeCode(lit, script, context, is_eval,
- Handle<SharedFunctionInfo>::null());
+ CompilationInfo info(Handle<SharedFunctionInfo>::null(),
+ Handle<Object>::null(), // No receiver.
+ 0); // Not nested in a loop.
+ Handle<Code> code = MakeCode(lit, script, context, is_eval, &info);
// Check for stack-overflow exceptions.
if (code.is_null()) {
@@ -389,8 +344,7 @@ Handle<JSFunction> Compiler::CompileEval(Handle<String> source,
}
-bool Compiler::CompileLazy(Handle<SharedFunctionInfo> shared,
- int loop_nesting) {
+bool Compiler::CompileLazy(CompilationInfo* info) {
CompilationZoneScope zone_scope(DELETE_ON_EXIT);
// The VM is in the COMPILER state until exiting this function.
@@ -399,6 +353,7 @@ bool Compiler::CompileLazy(Handle<SharedFunctionInfo> shared,
PostponeInterruptsScope postpone;
// Compute name, source code and script data.
+ Handle<SharedFunctionInfo> shared = info->shared_info();
Handle<String> name(String::cast(shared->name()));
Handle<Script> script(Script::cast(shared->script()));
@@ -420,17 +375,17 @@ bool Compiler::CompileLazy(Handle<SharedFunctionInfo> shared,
return false;
}
- // Update the loop nesting in the function literal.
- lit->set_loop_nesting(loop_nesting);
-
// Measure how long it takes to do the lazy compilation; only take
// the rest of the function into account to avoid overlap with the
// lazy parsing statistics.
HistogramTimerScope timer(&Counters::compile_lazy);
// Compile the code.
- Handle<Code> code = MakeCode(lit, script, Handle<Context>::null(), false,
- shared);
+ Handle<Code> code = MakeCode(lit,
+ script,
+ Handle<Context>::null(),
+ false,
+ info);
// Check for stack-overflow exception.
if (code.is_null()) {
@@ -508,24 +463,43 @@ Handle<JSFunction> Compiler::BuildBoilerplate(FunctionLiteral* literal,
return Handle<JSFunction>::null();
}
- // Generate code and return it.
+ // Generate code and return it. The way that the compilation mode
+ // is controlled by the command-line flags is described in
+ // the static helper function MakeCode.
+ CompilationInfo info(Handle<SharedFunctionInfo>::null(),
+ Handle<Object>::null(), // No receiver.
+ 0); // Not nested in a loop.
+
+ CHECK(!FLAG_always_full_compiler || !FLAG_always_fast_compiler);
+ bool is_run_once = literal->try_full_codegen();
bool is_compiled = false;
- if (FLAG_fast_compiler && literal->try_fast_codegen()) {
- CodeGenSelector selector;
- CodeGenSelector::CodeGenTag code_gen = selector.Select(literal);
- if (code_gen == CodeGenSelector::FAST) {
- code = FastCodeGenerator::MakeCode(literal,
+ if (FLAG_always_full_compiler || (FLAG_full_compiler && is_run_once)) {
+ FullCodeGenSyntaxChecker checker;
+ checker.Check(literal);
+ if (checker.has_supported_syntax()) {
+ code = FullCodeGenerator::MakeCode(literal,
script,
false); // Not eval.
is_compiled = true;
}
+ } else if (FLAG_always_fast_compiler ||
+ (FLAG_fast_compiler && !is_run_once)) {
+ // Since we are not lazily compiling we do not have a receiver to
+ // specialize for.
+ FastCodeGenSyntaxChecker checker;
+ checker.Check(literal, &info);
+ if (checker.has_supported_syntax()) {
+ code = FastCodeGenerator::MakeCode(literal, script, false, &info);
+ is_compiled = true;
+ }
}
if (!is_compiled) {
- // We didn't try the fast compiler, or we failed to select it.
+ // We fall back to the classic V8 code generator.
code = CodeGenerator::MakeCode(literal,
script,
- false); // Not eval.
+ false, // Not eval.
+ &info);
}
// Check for stack-overflow exception.
@@ -584,549 +558,8 @@ void Compiler::SetFunctionInfo(Handle<JSFunction> fun,
fun->shared()->SetThisPropertyAssignmentsInfo(
lit->has_only_simple_this_property_assignments(),
*lit->this_property_assignments());
- fun->shared()->set_try_fast_codegen(lit->try_fast_codegen());
-}
-
-
-CodeGenSelector::CodeGenTag CodeGenSelector::Select(FunctionLiteral* fun) {
- Scope* scope = fun->scope();
-
- if (scope->num_heap_slots() > 0) {
- // We support functions with a local context if they do not have
- // parameters that need to be copied into the context.
- for (int i = 0, len = scope->num_parameters(); i < len; i++) {
- Slot* slot = scope->parameter(i)->slot();
- if (slot != NULL && slot->type() == Slot::CONTEXT) {
- if (FLAG_trace_bailout) {
- PrintF("Function has context-allocated parameters.\n");
- }
- return NORMAL;
- }
- }
- }
-
- has_supported_syntax_ = true;
- VisitDeclarations(scope->declarations());
- if (!has_supported_syntax_) return NORMAL;
-
- VisitStatements(fun->body());
- return has_supported_syntax_ ? FAST : NORMAL;
-}
-
-
-#define BAILOUT(reason) \
- do { \
- if (FLAG_trace_bailout) { \
- PrintF("%s\n", reason); \
- } \
- has_supported_syntax_ = false; \
- return; \
- } while (false)
-
-
-#define CHECK_BAILOUT \
- do { \
- if (!has_supported_syntax_) return; \
- } while (false)
-
-
-void CodeGenSelector::VisitDeclarations(ZoneList<Declaration*>* decls) {
- for (int i = 0; i < decls->length(); i++) {
- Visit(decls->at(i));
- CHECK_BAILOUT;
- }
-}
-
-
-void CodeGenSelector::VisitStatements(ZoneList<Statement*>* stmts) {
- for (int i = 0, len = stmts->length(); i < len; i++) {
- Visit(stmts->at(i));
- CHECK_BAILOUT;
- }
-}
-
-
-void CodeGenSelector::VisitDeclaration(Declaration* decl) {
- Property* prop = decl->proxy()->AsProperty();
- if (prop != NULL) {
- ProcessExpression(prop->obj(), Expression::kValue);
- ProcessExpression(prop->key(), Expression::kValue);
- }
-
- if (decl->fun() != NULL) {
- ProcessExpression(decl->fun(), Expression::kValue);
- }
-}
-
-
-void CodeGenSelector::VisitBlock(Block* stmt) {
- VisitStatements(stmt->statements());
-}
-
-
-void CodeGenSelector::VisitExpressionStatement(ExpressionStatement* stmt) {
- ProcessExpression(stmt->expression(), Expression::kEffect);
-}
-
-
-void CodeGenSelector::VisitEmptyStatement(EmptyStatement* stmt) {
- // EmptyStatement is supported.
-}
-
-
-void CodeGenSelector::VisitIfStatement(IfStatement* stmt) {
- ProcessExpression(stmt->condition(), Expression::kTest);
- CHECK_BAILOUT;
- Visit(stmt->then_statement());
- CHECK_BAILOUT;
- Visit(stmt->else_statement());
-}
-
-
-void CodeGenSelector::VisitContinueStatement(ContinueStatement* stmt) {
-}
-
-
-void CodeGenSelector::VisitBreakStatement(BreakStatement* stmt) {
-}
-
-
-void CodeGenSelector::VisitReturnStatement(ReturnStatement* stmt) {
- ProcessExpression(stmt->expression(), Expression::kValue);
-}
-
-
-void CodeGenSelector::VisitWithEnterStatement(WithEnterStatement* stmt) {
- ProcessExpression(stmt->expression(), Expression::kValue);
-}
-
-
-void CodeGenSelector::VisitWithExitStatement(WithExitStatement* stmt) {
- // Supported.
-}
-
-
-void CodeGenSelector::VisitSwitchStatement(SwitchStatement* stmt) {
- BAILOUT("SwitchStatement");
-}
-
-
-void CodeGenSelector::VisitDoWhileStatement(DoWhileStatement* stmt) {
- // We do not handle loops with breaks or continue statements in their
- // body. We will bailout when we hit those statements in the body.
- ProcessExpression(stmt->cond(), Expression::kTest);
- CHECK_BAILOUT;
- Visit(stmt->body());
-}
-
-
-void CodeGenSelector::VisitWhileStatement(WhileStatement* stmt) {
- // We do not handle loops with breaks or continue statements in their
- // body. We will bailout when we hit those statements in the body.
- ProcessExpression(stmt->cond(), Expression::kTest);
- CHECK_BAILOUT;
- Visit(stmt->body());
-}
-
-
-void CodeGenSelector::VisitForStatement(ForStatement* stmt) {
- BAILOUT("ForStatement");
-}
-
-
-void CodeGenSelector::VisitForInStatement(ForInStatement* stmt) {
- BAILOUT("ForInStatement");
-}
-
-
-void CodeGenSelector::VisitTryCatchStatement(TryCatchStatement* stmt) {
- Visit(stmt->try_block());
- CHECK_BAILOUT;
- Visit(stmt->catch_block());
-}
-
-
-void CodeGenSelector::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
- Visit(stmt->try_block());
- CHECK_BAILOUT;
- Visit(stmt->finally_block());
-}
-
-
-void CodeGenSelector::VisitDebuggerStatement(DebuggerStatement* stmt) {
- // Debugger statement is supported.
-}
-
-
-void CodeGenSelector::VisitFunctionLiteral(FunctionLiteral* expr) {
- // Function literal is supported.
-}
-
-
-void CodeGenSelector::VisitFunctionBoilerplateLiteral(
- FunctionBoilerplateLiteral* expr) {
- BAILOUT("FunctionBoilerplateLiteral");
-}
-
-
-void CodeGenSelector::VisitConditional(Conditional* expr) {
- ProcessExpression(expr->condition(), Expression::kTest);
- CHECK_BAILOUT;
- ProcessExpression(expr->then_expression(), context_);
- CHECK_BAILOUT;
- ProcessExpression(expr->else_expression(), context_);
-}
-
-
-void CodeGenSelector::VisitSlot(Slot* expr) {
- UNREACHABLE();
-}
-
-
-void CodeGenSelector::VisitVariableProxy(VariableProxy* expr) {
- Expression* rewrite = expr->var()->rewrite();
- // A rewrite of NULL indicates a global variable.
- if (rewrite != NULL) {
- // Non-global.
- Slot* slot = rewrite->AsSlot();
- if (slot != NULL) {
- Slot::Type type = slot->type();
- // When LOOKUP slots are enabled, some currently dead code
- // implementing unary typeof will become live.
- if (type == Slot::LOOKUP) {
- BAILOUT("Lookup slot");
- }
- } else {
-#ifdef DEBUG
- // Only remaining possibility is a property where the object is
- // a slotted variable and the key is a smi.
- Property* property = rewrite->AsProperty();
- ASSERT_NOT_NULL(property);
- Variable* object = property->obj()->AsVariableProxy()->AsVariable();
- ASSERT_NOT_NULL(object);
- ASSERT_NOT_NULL(object->slot());
- ASSERT_NOT_NULL(property->key()->AsLiteral());
- ASSERT(property->key()->AsLiteral()->handle()->IsSmi());
-#endif
- }
- }
-}
-
-
-void CodeGenSelector::VisitLiteral(Literal* expr) {
- /* Nothing to do. */
-}
-
-
-void CodeGenSelector::VisitRegExpLiteral(RegExpLiteral* expr) {
- /* Nothing to do. */
-}
-
-
-void CodeGenSelector::VisitObjectLiteral(ObjectLiteral* expr) {
- ZoneList<ObjectLiteral::Property*>* properties = expr->properties();
-
- for (int i = 0, len = properties->length(); i < len; i++) {
- ObjectLiteral::Property* property = properties->at(i);
- if (property->IsCompileTimeValue()) continue;
-
- switch (property->kind()) {
- case ObjectLiteral::Property::CONSTANT:
- UNREACHABLE();
-
- // For (non-compile-time) materialized literals and computed
- // properties with symbolic keys we will use an IC and therefore not
- // generate code for the key.
- case ObjectLiteral::Property::COMPUTED: // Fall through.
- case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- if (property->key()->handle()->IsSymbol()) {
- break;
- }
- // Fall through.
-
- // In all other cases we need the key's value on the stack
- // for a runtime call. (Relies on TEMP meaning STACK.)
- case ObjectLiteral::Property::GETTER: // Fall through.
- case ObjectLiteral::Property::SETTER: // Fall through.
- case ObjectLiteral::Property::PROTOTYPE:
- ProcessExpression(property->key(), Expression::kValue);
- CHECK_BAILOUT;
- break;
- }
- ProcessExpression(property->value(), Expression::kValue);
- CHECK_BAILOUT;
- }
-}
-
-
-void CodeGenSelector::VisitArrayLiteral(ArrayLiteral* expr) {
- ZoneList<Expression*>* subexprs = expr->values();
- for (int i = 0, len = subexprs->length(); i < len; i++) {
- Expression* subexpr = subexprs->at(i);
- if (subexpr->AsLiteral() != NULL) continue;
- if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
- ProcessExpression(subexpr, Expression::kValue);
- CHECK_BAILOUT;
- }
-}
-
-
-void CodeGenSelector::VisitCatchExtensionObject(CatchExtensionObject* expr) {
- ProcessExpression(expr->key(), Expression::kValue);
- CHECK_BAILOUT;
- ProcessExpression(expr->value(), Expression::kValue);
-}
-
-
-void CodeGenSelector::VisitAssignment(Assignment* expr) {
- // We support plain non-compound assignments to properties, parameters and
- // non-context (stack-allocated) locals, and global variables.
- Token::Value op = expr->op();
- if (op == Token::INIT_CONST) BAILOUT("initialize constant");
-
- Variable* var = expr->target()->AsVariableProxy()->AsVariable();
- Property* prop = expr->target()->AsProperty();
- ASSERT(var == NULL || prop == NULL);
- if (var != NULL) {
- if (var->mode() == Variable::CONST) {
- BAILOUT("Assignment to const");
- }
- // All global variables are supported.
- if (!var->is_global()) {
- ASSERT(var->slot() != NULL);
- Slot::Type type = var->slot()->type();
- if (type == Slot::LOOKUP) {
- BAILOUT("Lookup slot");
- }
- }
- } else if (prop != NULL) {
- ProcessExpression(prop->obj(), Expression::kValue);
- CHECK_BAILOUT;
- // We will only visit the key during code generation for keyed property
- // stores. Leave its expression context uninitialized for named
- // property stores.
- if (!prop->key()->IsPropertyName()) {
- ProcessExpression(prop->key(), Expression::kValue);
- CHECK_BAILOUT;
- }
- } else {
- // This is a throw reference error.
- BAILOUT("non-variable/non-property assignment");
- }
-
- ProcessExpression(expr->value(), Expression::kValue);
-}
-
-
-void CodeGenSelector::VisitThrow(Throw* expr) {
- ProcessExpression(expr->exception(), Expression::kValue);
-}
-
-
-void CodeGenSelector::VisitProperty(Property* expr) {
- ProcessExpression(expr->obj(), Expression::kValue);
- CHECK_BAILOUT;
- ProcessExpression(expr->key(), Expression::kValue);
-}
-
-
-void CodeGenSelector::VisitCall(Call* expr) {
- Expression* fun = expr->expression();
- ZoneList<Expression*>* args = expr->arguments();
- Variable* var = fun->AsVariableProxy()->AsVariable();
-
- // Check for supported calls
- if (var != NULL && var->is_possibly_eval()) {
- BAILOUT("call to the identifier 'eval'");
- } else if (var != NULL && !var->is_this() && var->is_global()) {
- // Calls to global variables are supported.
- } else if (var != NULL && var->slot() != NULL &&
- var->slot()->type() == Slot::LOOKUP) {
- BAILOUT("call to a lookup slot");
- } else if (fun->AsProperty() != NULL) {
- Property* prop = fun->AsProperty();
- Literal* literal_key = prop->key()->AsLiteral();
- if (literal_key != NULL && literal_key->handle()->IsSymbol()) {
- ProcessExpression(prop->obj(), Expression::kValue);
- CHECK_BAILOUT;
- } else {
- ProcessExpression(prop->obj(), Expression::kValue);
- CHECK_BAILOUT;
- ProcessExpression(prop->key(), Expression::kValue);
- CHECK_BAILOUT;
- }
- } else {
- // Otherwise the call is supported if the function expression is.
- ProcessExpression(fun, Expression::kValue);
- }
- // Check all arguments to the call.
- for (int i = 0; i < args->length(); i++) {
- ProcessExpression(args->at(i), Expression::kValue);
- CHECK_BAILOUT;
- }
-}
-
-
-void CodeGenSelector::VisitCallNew(CallNew* expr) {
- ProcessExpression(expr->expression(), Expression::kValue);
- CHECK_BAILOUT;
- ZoneList<Expression*>* args = expr->arguments();
- // Check all arguments to the call
- for (int i = 0; i < args->length(); i++) {
- ProcessExpression(args->at(i), Expression::kValue);
- CHECK_BAILOUT;
- }
-}
-
-
-void CodeGenSelector::VisitCallRuntime(CallRuntime* expr) {
- // Check for inline runtime call
- if (expr->name()->Get(0) == '_' &&
- CodeGenerator::FindInlineRuntimeLUT(expr->name()) != NULL) {
- BAILOUT("inlined runtime call");
- }
- // Check all arguments to the call. (Relies on TEMP meaning STACK.)
- for (int i = 0; i < expr->arguments()->length(); i++) {
- ProcessExpression(expr->arguments()->at(i), Expression::kValue);
- CHECK_BAILOUT;
- }
+ fun->shared()->set_try_full_codegen(lit->try_full_codegen());
}
-void CodeGenSelector::VisitUnaryOperation(UnaryOperation* expr) {
- switch (expr->op()) {
- case Token::VOID:
- ProcessExpression(expr->expression(), Expression::kEffect);
- break;
- case Token::NOT:
- ProcessExpression(expr->expression(), Expression::kTest);
- break;
- case Token::TYPEOF:
- ProcessExpression(expr->expression(), Expression::kValue);
- break;
- default:
- BAILOUT("UnaryOperation");
- }
-}
-
-
-void CodeGenSelector::VisitCountOperation(CountOperation* expr) {
- Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
- Property* prop = expr->expression()->AsProperty();
- ASSERT(var == NULL || prop == NULL);
- if (var != NULL) {
- // All global variables are supported.
- if (!var->is_global()) {
- ASSERT(var->slot() != NULL);
- Slot::Type type = var->slot()->type();
- if (type == Slot::LOOKUP) {
- BAILOUT("CountOperation with lookup slot");
- }
- }
- } else if (prop != NULL) {
- ProcessExpression(prop->obj(), Expression::kValue);
- CHECK_BAILOUT;
- // We will only visit the key during code generation for keyed property
- // stores. Leave its expression context uninitialized for named
- // property stores.
- if (!prop->key()->IsPropertyName()) {
- ProcessExpression(prop->key(), Expression::kValue);
- CHECK_BAILOUT;
- }
- } else {
- // This is a throw reference error.
- BAILOUT("CountOperation non-variable/non-property expression");
- }
-}
-
-
-void CodeGenSelector::VisitBinaryOperation(BinaryOperation* expr) {
- switch (expr->op()) {
- case Token::COMMA:
- ProcessExpression(expr->left(), Expression::kEffect);
- CHECK_BAILOUT;
- ProcessExpression(expr->right(), context_);
- break;
-
- case Token::OR:
- switch (context_) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect: // Fall through.
- case Expression::kTest: // Fall through.
- case Expression::kTestValue:
- // The left subexpression's value is not needed, it is in a pure
- // test context.
- ProcessExpression(expr->left(), Expression::kTest);
- break;
- case Expression::kValue: // Fall through.
- case Expression::kValueTest:
- // The left subexpression's value is needed, it is in a hybrid
- // value/test context.
- ProcessExpression(expr->left(), Expression::kValueTest);
- break;
- }
- CHECK_BAILOUT;
- ProcessExpression(expr->right(), context_);
- break;
-
- case Token::AND:
- switch (context_) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect: // Fall through.
- case Expression::kTest: // Fall through.
- case Expression::kValueTest:
- // The left subexpression's value is not needed, it is in a pure
- // test context.
- ProcessExpression(expr->left(), Expression::kTest);
- break;
- case Expression::kValue: // Fall through.
- case Expression::kTestValue:
- // The left subexpression's value is needed, it is in a hybrid
- // test/value context.
- ProcessExpression(expr->left(), Expression::kTestValue);
- break;
- }
- CHECK_BAILOUT;
- ProcessExpression(expr->right(), context_);
- break;
-
- case Token::ADD:
- case Token::SUB:
- case Token::DIV:
- case Token::MOD:
- case Token::MUL:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SHL:
- case Token::SHR:
- case Token::SAR:
- ProcessExpression(expr->left(), Expression::kValue);
- CHECK_BAILOUT;
- ProcessExpression(expr->right(), Expression::kValue);
- break;
-
- default:
- BAILOUT("Unsupported binary operation");
- }
-}
-
-
-void CodeGenSelector::VisitCompareOperation(CompareOperation* expr) {
- ProcessExpression(expr->left(), Expression::kValue);
- CHECK_BAILOUT;
- ProcessExpression(expr->right(), Expression::kValue);
-}
-
-
-void CodeGenSelector::VisitThisFunction(ThisFunction* expr) {
- // ThisFunction is supported.
-}
-
-#undef BAILOUT
-#undef CHECK_BAILOUT
-
-
} } // namespace v8::internal
diff --git a/deps/v8/src/compiler.h b/deps/v8/src/compiler.h
index 546e446b9..19499de71 100644
--- a/deps/v8/src/compiler.h
+++ b/deps/v8/src/compiler.h
@@ -35,6 +35,41 @@
namespace v8 {
namespace internal {
+// CompilationInfo encapsulates some information known at compile time.
+class CompilationInfo BASE_EMBEDDED {
+ public:
+ CompilationInfo(Handle<SharedFunctionInfo> shared_info,
+ Handle<Object> receiver,
+ int loop_nesting)
+ : shared_info_(shared_info),
+ receiver_(receiver),
+ loop_nesting_(loop_nesting),
+ has_this_properties_(false),
+ has_globals_(false) {
+ }
+
+ Handle<SharedFunctionInfo> shared_info() { return shared_info_; }
+
+ bool has_receiver() { return !receiver_.is_null(); }
+ Handle<Object> receiver() { return receiver_; }
+
+ int loop_nesting() { return loop_nesting_; }
+
+ bool has_this_properties() { return has_this_properties_; }
+ void set_has_this_properties(bool flag) { has_this_properties_ = flag; }
+
+ bool has_globals() { return has_globals_; }
+ void set_has_globals(bool flag) { has_globals_ = flag; }
+
+ private:
+ Handle<SharedFunctionInfo> shared_info_;
+ Handle<Object> receiver_;
+ int loop_nesting_;
+ bool has_this_properties_;
+ bool has_globals_;
+};
+
+
// The V8 compiler
//
// General strategy: Source code is translated into an anonymous function w/o
@@ -70,7 +105,7 @@ class Compiler : public AllStatic {
// Compile from function info (used for lazy compilation). Returns
// true on success and false if the compilation resulted in a stack
// overflow.
- static bool CompileLazy(Handle<SharedFunctionInfo> shared, int loop_nesting);
+ static bool CompileLazy(CompilationInfo* info);
// Compile a function boilerplate object (the function is possibly
// lazily compiled). Called recursively from a backend code
diff --git a/deps/v8/src/data-flow.cc b/deps/v8/src/data-flow.cc
new file mode 100644
index 000000000..0e30b3151
--- /dev/null
+++ b/deps/v8/src/data-flow.cc
@@ -0,0 +1,267 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "data-flow.h"
+
+namespace v8 {
+namespace internal {
+
+
+void AstLabeler::Label(FunctionLiteral* fun) {
+ VisitStatements(fun->body());
+}
+
+
+void AstLabeler::VisitStatements(ZoneList<Statement*>* stmts) {
+ for (int i = 0, len = stmts->length(); i < len; i++) {
+ Visit(stmts->at(i));
+ }
+}
+
+
+void AstLabeler::VisitDeclarations(ZoneList<Declaration*>* decls) {
+ UNREACHABLE();
+}
+
+
+void AstLabeler::VisitBlock(Block* stmt) {
+ VisitStatements(stmt->statements());
+}
+
+
+void AstLabeler::VisitExpressionStatement(
+ ExpressionStatement* stmt) {
+ Visit(stmt->expression());
+}
+
+
+void AstLabeler::VisitEmptyStatement(EmptyStatement* stmt) {
+ // Do nothing.
+}
+
+
+void AstLabeler::VisitIfStatement(IfStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void AstLabeler::VisitContinueStatement(ContinueStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void AstLabeler::VisitBreakStatement(BreakStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void AstLabeler::VisitReturnStatement(ReturnStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void AstLabeler::VisitWithEnterStatement(
+ WithEnterStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void AstLabeler::VisitWithExitStatement(WithExitStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void AstLabeler::VisitSwitchStatement(SwitchStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void AstLabeler::VisitDoWhileStatement(DoWhileStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void AstLabeler::VisitWhileStatement(WhileStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void AstLabeler::VisitForStatement(ForStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void AstLabeler::VisitForInStatement(ForInStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void AstLabeler::VisitTryCatchStatement(TryCatchStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void AstLabeler::VisitTryFinallyStatement(
+ TryFinallyStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void AstLabeler::VisitDebuggerStatement(
+ DebuggerStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void AstLabeler::VisitFunctionLiteral(FunctionLiteral* expr) {
+ UNREACHABLE();
+}
+
+
+void AstLabeler::VisitFunctionBoilerplateLiteral(
+ FunctionBoilerplateLiteral* expr) {
+ UNREACHABLE();
+}
+
+
+void AstLabeler::VisitConditional(Conditional* expr) {
+ UNREACHABLE();
+}
+
+
+void AstLabeler::VisitSlot(Slot* expr) {
+ UNREACHABLE();
+}
+
+
+void AstLabeler::VisitVariableProxy(VariableProxy* expr) {
+ expr->set_num(next_number_++);
+}
+
+
+void AstLabeler::VisitLiteral(Literal* expr) {
+ UNREACHABLE();
+}
+
+
+void AstLabeler::VisitRegExpLiteral(RegExpLiteral* expr) {
+ UNREACHABLE();
+}
+
+
+void AstLabeler::VisitObjectLiteral(ObjectLiteral* expr) {
+ UNREACHABLE();
+}
+
+
+void AstLabeler::VisitArrayLiteral(ArrayLiteral* expr) {
+ UNREACHABLE();
+}
+
+
+void AstLabeler::VisitCatchExtensionObject(
+ CatchExtensionObject* expr) {
+ UNREACHABLE();
+}
+
+
+void AstLabeler::VisitAssignment(Assignment* expr) {
+ Property* prop = expr->target()->AsProperty();
+ ASSERT(prop != NULL);
+ if (prop != NULL) {
+ ASSERT(prop->key()->IsPropertyName());
+ VariableProxy* proxy = prop->obj()->AsVariableProxy();
+ if (proxy != NULL && proxy->var()->is_this()) {
+ has_this_properties_ = true;
+ } else {
+ Visit(prop->obj());
+ }
+ }
+ Visit(expr->value());
+ expr->set_num(next_number_++);
+}
+
+
+void AstLabeler::VisitThrow(Throw* expr) {
+ UNREACHABLE();
+}
+
+
+void AstLabeler::VisitProperty(Property* expr) {
+ UNREACHABLE();
+}
+
+
+void AstLabeler::VisitCall(Call* expr) {
+ UNREACHABLE();
+}
+
+
+void AstLabeler::VisitCallNew(CallNew* expr) {
+ UNREACHABLE();
+}
+
+
+void AstLabeler::VisitCallRuntime(CallRuntime* expr) {
+ UNREACHABLE();
+}
+
+
+void AstLabeler::VisitUnaryOperation(UnaryOperation* expr) {
+ UNREACHABLE();
+}
+
+
+void AstLabeler::VisitCountOperation(CountOperation* expr) {
+ UNREACHABLE();
+}
+
+
+void AstLabeler::VisitBinaryOperation(BinaryOperation* expr) {
+ Visit(expr->left());
+ Visit(expr->right());
+ expr->set_num(next_number_++);
+}
+
+
+void AstLabeler::VisitCompareOperation(CompareOperation* expr) {
+ UNREACHABLE();
+}
+
+
+void AstLabeler::VisitThisFunction(ThisFunction* expr) {
+ UNREACHABLE();
+}
+
+
+void AstLabeler::VisitDeclaration(Declaration* decl) {
+ UNREACHABLE();
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/data-flow.h b/deps/v8/src/data-flow.h
new file mode 100644
index 000000000..ac8350318
--- /dev/null
+++ b/deps/v8/src/data-flow.h
@@ -0,0 +1,67 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_DATAFLOW_H_
+#define V8_DATAFLOW_H_
+
+#include "ast.h"
+#include "scopes.h"
+
+namespace v8 {
+namespace internal {
+
+// This class is used to number all expressions in the AST according to
+// their evaluation order (post-order left-to-right traversal).
+class AstLabeler: public AstVisitor {
+ public:
+ AstLabeler() : next_number_(0), has_this_properties_(false) {}
+
+ void Label(FunctionLiteral* fun);
+
+ bool has_this_properties() { return has_this_properties_; }
+
+ private:
+ void VisitDeclarations(ZoneList<Declaration*>* decls);
+ void VisitStatements(ZoneList<Statement*>* stmts);
+
+ // AST node visit functions.
+#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
+ AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
+
+ // Traversal number for labelling AST nodes.
+ int next_number_;
+
+ bool has_this_properties_;
+
+ DISALLOW_COPY_AND_ASSIGN(AstLabeler);
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_DATAFLOW_H_
diff --git a/deps/v8/src/dateparser.cc b/deps/v8/src/dateparser.cc
index 1cc9aa169..51a63e1a0 100644
--- a/deps/v8/src/dateparser.cc
+++ b/deps/v8/src/dateparser.cc
@@ -72,15 +72,9 @@ bool DateParser::DayComposer::Write(FixedArray* output) {
if (!Smi::IsValid(year) || !IsMonth(month) || !IsDay(day)) return false;
- output->set(YEAR,
- Smi::FromInt(year),
- SKIP_WRITE_BARRIER);
- output->set(MONTH,
- Smi::FromInt(month - 1),
- SKIP_WRITE_BARRIER); // 0-based
- output->set(DAY,
- Smi::FromInt(day),
- SKIP_WRITE_BARRIER);
+ output->set(YEAR, Smi::FromInt(year));
+ output->set(MONTH, Smi::FromInt(month - 1)); // 0-based
+ output->set(DAY, Smi::FromInt(day));
return true;
}
@@ -103,15 +97,9 @@ bool DateParser::TimeComposer::Write(FixedArray* output) {
if (!IsHour(hour) || !IsMinute(minute) || !IsSecond(second)) return false;
- output->set(HOUR,
- Smi::FromInt(hour),
- SKIP_WRITE_BARRIER);
- output->set(MINUTE,
- Smi::FromInt(minute),
- SKIP_WRITE_BARRIER);
- output->set(SECOND,
- Smi::FromInt(second),
- SKIP_WRITE_BARRIER);
+ output->set(HOUR, Smi::FromInt(hour));
+ output->set(MINUTE, Smi::FromInt(minute));
+ output->set(SECOND, Smi::FromInt(second));
return true;
}
@@ -121,13 +109,9 @@ bool DateParser::TimeZoneComposer::Write(FixedArray* output) {
if (minute_ == kNone) minute_ = 0;
int total_seconds = sign_ * (hour_ * 3600 + minute_ * 60);
if (!Smi::IsValid(total_seconds)) return false;
- output->set(UTC_OFFSET,
- Smi::FromInt(total_seconds),
- SKIP_WRITE_BARRIER);
+ output->set(UTC_OFFSET, Smi::FromInt(total_seconds));
} else {
- output->set(UTC_OFFSET,
- Heap::null_value(),
- SKIP_WRITE_BARRIER);
+ output->set_null(UTC_OFFSET);
}
return true;
}
diff --git a/deps/v8/src/debug-agent.cc b/deps/v8/src/debug-agent.cc
index 070138254..41151d807 100644
--- a/deps/v8/src/debug-agent.cc
+++ b/deps/v8/src/debug-agent.cc
@@ -54,10 +54,12 @@ void DebuggerAgent::Run() {
while (!bound && !terminate_) {
bound = server_->Bind(port_);
- // If an error occoured wait a bit before retrying. The most common error
+ // If an error occurred wait a bit before retrying. The most common error
// would be that the port is already in use so this avoids a busy loop and
// make the agent take over the port when it becomes free.
if (!bound) {
+ PrintF("Failed to open socket on port %d, "
+ "waiting %d ms before retrying\n", port_, kOneSecondInMicros / 1000);
terminate_now_->Wait(kOneSecondInMicros);
}
}
diff --git a/deps/v8/src/debug-delay.js b/deps/v8/src/debug-delay.js
index 04fde1f99..14d8c8830 100644
--- a/deps/v8/src/debug-delay.js
+++ b/deps/v8/src/debug-delay.js
@@ -1704,7 +1704,7 @@ DebugCommandProcessor.prototype.evaluateRequest_ = function(request, response) {
if (global) {
// Evaluate in the global context.
response.body =
- this.exec_state_.evaluateGlobal(expression), Boolean(disable_break);
+ this.exec_state_.evaluateGlobal(expression, Boolean(disable_break));
return;
}
diff --git a/deps/v8/src/debug.cc b/deps/v8/src/debug.cc
index fbe09391e..fb9b23eb5 100644
--- a/deps/v8/src/debug.cc
+++ b/deps/v8/src/debug.cc
@@ -75,9 +75,6 @@ BreakLocationIterator::BreakLocationIterator(Handle<DebugInfo> debug_info,
BreakLocatorType type) {
debug_info_ = debug_info;
type_ = type;
- // Get the stub early to avoid possible GC during iterations. We may need
- // this stub to detect debugger calls generated from debugger statements.
- debug_break_stub_ = RuntimeStub(Runtime::kDebugBreak, 0).GetCode();
reloc_iterator_ = NULL;
reloc_iterator_original_ = NULL;
Reset(); // Initialize the rest of the member variables.
@@ -461,9 +458,7 @@ bool BreakLocationIterator::IsDebuggerStatement() {
Code* code = Code::GetCodeFromTargetAddress(target);
if (code->kind() == Code::STUB) {
CodeStub::Major major_key = code->major_key();
- if (major_key == CodeStub::Runtime) {
- return (*debug_break_stub_ == code);
- }
+ return (major_key == CodeStub::DebuggerStatement);
}
}
return false;
@@ -1241,12 +1236,14 @@ void Debug::PrepareStep(StepAction step_action, int step_count) {
uint32_t key = Smi::cast(*obj)->value();
// Argc in the stub is the number of arguments passed - not the
// expected arguments of the called function.
- int call_function_arg_count = CodeStub::MinorKeyFromKey(key);
+ int call_function_arg_count =
+ CallFunctionStub::ExtractArgcFromMinorKey(
+ CodeStub::MinorKeyFromKey(key));
ASSERT(call_function_stub->major_key() ==
CodeStub::MajorKeyFromKey(key));
// Find target function on the expression stack.
- // Expression stack lools like this (top to bottom):
+ // Expression stack looks like this (top to bottom):
// argN
// ...
// arg0
@@ -1524,19 +1521,13 @@ void Debug::ClearStepNext() {
}
-bool Debug::EnsureCompiled(Handle<SharedFunctionInfo> shared) {
- if (shared->is_compiled()) return true;
- return CompileLazyShared(shared, CLEAR_EXCEPTION, 0);
-}
-
-
// Ensures the debug information is present for shared.
bool Debug::EnsureDebugInfo(Handle<SharedFunctionInfo> shared) {
// Return if we already have the debug info for shared.
if (HasDebugInfo(shared)) return true;
// Ensure shared in compiled. Return false if this failed.
- if (!EnsureCompiled(shared)) return false;
+ if (!EnsureCompiled(shared, CLEAR_EXCEPTION)) return false;
// Create the debug info object.
Handle<DebugInfo> debug_info = Factory::NewDebugInfo(shared);
@@ -1693,9 +1684,7 @@ void Debug::CreateScriptCache() {
// Scan heap for Script objects.
int count = 0;
HeapIterator iterator;
- while (iterator.has_next()) {
- HeapObject* obj = iterator.next();
- ASSERT(obj != NULL);
+ for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
if (obj->IsScript() && Script::cast(obj)->HasValidSource()) {
script_cache_->Add(Handle<Script>(Script::cast(obj)));
count++;
@@ -1759,8 +1748,10 @@ bool Debugger::never_unload_debugger_ = false;
v8::Debug::MessageHandler2 Debugger::message_handler_ = NULL;
bool Debugger::debugger_unload_pending_ = false;
v8::Debug::HostDispatchHandler Debugger::host_dispatch_handler_ = NULL;
+Mutex* Debugger::dispatch_handler_access_ = OS::CreateMutex();
v8::Debug::DebugMessageDispatchHandler
Debugger::debug_message_dispatch_handler_ = NULL;
+MessageDispatchHelperThread* Debugger::message_dispatch_helper_thread_ = NULL;
int Debugger::host_dispatch_micros_ = 100 * 1000;
DebuggerAgent* Debugger::agent_ = NULL;
LockingCommandMessageQueue Debugger::command_queue_(kQueueInitialSize);
@@ -2379,17 +2370,12 @@ void Debugger::ListenersChanged() {
if (IsDebuggerActive()) {
// Disable the compilation cache when the debugger is active.
CompilationCache::Disable();
+ debugger_unload_pending_ = false;
} else {
CompilationCache::Enable();
-
// Unload the debugger if event listener and message handler cleared.
- if (Debug::InDebugger()) {
- // If we are in debugger set the flag to unload the debugger when last
- // EnterDebugger on the current stack is destroyed.
- debugger_unload_pending_ = true;
- } else {
- UnloadDebugger();
- }
+ // Schedule this for later, because we may be in non-V8 thread.
+ debugger_unload_pending_ = true;
}
}
@@ -2402,8 +2388,14 @@ void Debugger::SetHostDispatchHandler(v8::Debug::HostDispatchHandler handler,
void Debugger::SetDebugMessageDispatchHandler(
- v8::Debug::DebugMessageDispatchHandler handler) {
+ v8::Debug::DebugMessageDispatchHandler handler, bool provide_locker) {
+ ScopedLock with(dispatch_handler_access_);
debug_message_dispatch_handler_ = handler;
+
+ if (provide_locker && message_dispatch_helper_thread_ == NULL) {
+ message_dispatch_helper_thread_ = new MessageDispatchHelperThread;
+ message_dispatch_helper_thread_->Start();
+ }
}
@@ -2438,8 +2430,16 @@ void Debugger::ProcessCommand(Vector<const uint16_t> command,
StackGuard::DebugCommand();
}
- if (Debugger::debug_message_dispatch_handler_ != NULL) {
- Debugger::debug_message_dispatch_handler_();
+ MessageDispatchHelperThread* dispatch_thread;
+ {
+ ScopedLock with(dispatch_handler_access_);
+ dispatch_thread = message_dispatch_helper_thread_;
+ }
+
+ if (dispatch_thread == NULL) {
+ CallMessageDispatchHandler();
+ } else {
+ dispatch_thread->Schedule();
}
}
@@ -2526,6 +2526,19 @@ void Debugger::WaitForAgent() {
agent_->WaitUntilListening();
}
+
+void Debugger::CallMessageDispatchHandler() {
+ v8::Debug::DebugMessageDispatchHandler handler;
+ {
+ ScopedLock with(dispatch_handler_access_);
+ handler = Debugger::debug_message_dispatch_handler_;
+ }
+ if (handler != NULL) {
+ handler();
+ }
+}
+
+
MessageImpl MessageImpl::NewEvent(DebugEvent event,
bool running,
Handle<JSObject> exec_state,
@@ -2746,6 +2759,45 @@ void LockingCommandMessageQueue::Clear() {
queue_.Clear();
}
+
+MessageDispatchHelperThread::MessageDispatchHelperThread()
+ : sem_(OS::CreateSemaphore(0)), mutex_(OS::CreateMutex()),
+ already_signalled_(false) {
+}
+
+
+MessageDispatchHelperThread::~MessageDispatchHelperThread() {
+ delete mutex_;
+ delete sem_;
+}
+
+
+void MessageDispatchHelperThread::Schedule() {
+ {
+ ScopedLock lock(mutex_);
+ if (already_signalled_) {
+ return;
+ }
+ already_signalled_ = true;
+ }
+ sem_->Signal();
+}
+
+
+void MessageDispatchHelperThread::Run() {
+ while (true) {
+ sem_->Wait();
+ {
+ ScopedLock lock(mutex_);
+ already_signalled_ = false;
+ }
+ {
+ Locker locker;
+ Debugger::CallMessageDispatchHandler();
+ }
+ }
+}
+
#endif // ENABLE_DEBUGGER_SUPPORT
} } // namespace v8::internal
diff --git a/deps/v8/src/debug.h b/deps/v8/src/debug.h
index c37e08b38..cab9e8e44 100644
--- a/deps/v8/src/debug.h
+++ b/deps/v8/src/debug.h
@@ -132,7 +132,6 @@ class BreakLocationIterator {
int position_;
int statement_position_;
Handle<DebugInfo> debug_info_;
- Handle<Code> debug_break_stub_;
RelocIterator* reloc_iterator_;
RelocIterator* reloc_iterator_original_;
@@ -391,7 +390,6 @@ class Debug {
static void ClearStepOut();
static void ClearStepNext();
// Returns whether the compile succeeded.
- static bool EnsureCompiled(Handle<SharedFunctionInfo> shared);
static void RemoveDebugInfo(Handle<DebugInfo> debug_info);
static void SetAfterBreakTarget(JavaScriptFrame* frame);
static Handle<Object> CheckBreakPoints(Handle<Object> break_point);
@@ -559,6 +557,9 @@ class CommandMessageQueue BASE_EMBEDDED {
};
+class MessageDispatchHelperThread;
+
+
// LockingCommandMessageQueue is a thread-safe circular buffer of CommandMessage
// messages. The message data is not managed by LockingCommandMessageQueue.
// Pointers to the data are passed in and out. Implemented by adding a
@@ -619,7 +620,8 @@ class Debugger {
static void SetHostDispatchHandler(v8::Debug::HostDispatchHandler handler,
int period);
static void SetDebugMessageDispatchHandler(
- v8::Debug::DebugMessageDispatchHandler handler);
+ v8::Debug::DebugMessageDispatchHandler handler,
+ bool provide_locker);
// Invoke the message handler function.
static void InvokeMessageHandler(MessageImpl message);
@@ -645,6 +647,8 @@ class Debugger {
// Blocks until the agent has started listening for connections
static void WaitForAgent();
+ static void CallMessageDispatchHandler();
+
// Unload the debugger if possible. Only called when no debugger is currently
// active.
static void UnloadDebugger();
@@ -654,7 +658,9 @@ class Debugger {
// Check whether the message handler was been cleared.
if (debugger_unload_pending_) {
- UnloadDebugger();
+ if (Debug::debugger_entry() == NULL) {
+ UnloadDebugger();
+ }
}
// Currently argument event is not used.
@@ -681,7 +687,9 @@ class Debugger {
static v8::Debug::MessageHandler2 message_handler_;
static bool debugger_unload_pending_; // Was message handler cleared?
static v8::Debug::HostDispatchHandler host_dispatch_handler_;
+ static Mutex* dispatch_handler_access_; // Mutex guarding dispatch handler.
static v8::Debug::DebugMessageDispatchHandler debug_message_dispatch_handler_;
+ static MessageDispatchHelperThread* message_dispatch_helper_thread_;
static int host_dispatch_micros_;
static DebuggerAgent* agent_;
@@ -858,6 +866,27 @@ class Debug_Address {
int reg_;
};
+// The optional thread that Debug Agent may use to temporary call V8 to process
+// pending debug requests if debuggee is not running V8 at the moment.
+// Techincally it does not call V8 itself, rather it asks embedding program
+// to do this via v8::Debug::HostDispatchHandler
+class MessageDispatchHelperThread: public Thread {
+ public:
+ MessageDispatchHelperThread();
+ ~MessageDispatchHelperThread();
+
+ void Schedule();
+
+ private:
+ void Run();
+
+ Semaphore* const sem_;
+ Mutex* const mutex_;
+ bool already_signalled_;
+
+ DISALLOW_COPY_AND_ASSIGN(MessageDispatchHelperThread);
+};
+
} } // namespace v8::internal
diff --git a/deps/v8/src/disassembler.cc b/deps/v8/src/disassembler.cc
index 524dbe671..50f3eb996 100644
--- a/deps/v8/src/disassembler.cc
+++ b/deps/v8/src/disassembler.cc
@@ -266,13 +266,7 @@ static int DecodeIt(FILE* f,
case CodeStub::CallFunction:
out.AddFormatted("argc = %d", minor_key);
break;
- case CodeStub::Runtime: {
- const char* name =
- RuntimeStub::GetNameFromMinorKey(minor_key);
- out.AddFormatted("%s", name);
- break;
- }
- default:
+ default:
out.AddFormatted("minor: %d", minor_key);
}
}
diff --git a/deps/v8/src/execution.cc b/deps/v8/src/execution.cc
index 0f935ac8a..a79af2373 100644
--- a/deps/v8/src/execution.cc
+++ b/deps/v8/src/execution.cc
@@ -638,24 +638,32 @@ Object* Execution::DebugBreakHelper() {
bool debug_command_only =
StackGuard::IsDebugCommand() && !StackGuard::IsDebugBreak();
- // Clear the debug request flags.
+ // Clear the debug break request flag.
StackGuard::Continue(DEBUGBREAK);
+
+ ProcessDebugMesssages(debug_command_only);
+
+ // Return to continue execution.
+ return Heap::undefined_value();
+}
+
+void Execution::ProcessDebugMesssages(bool debug_command_only) {
+ // Clear the debug command request flag.
StackGuard::Continue(DEBUGCOMMAND);
HandleScope scope;
// Enter the debugger. Just continue if we fail to enter the debugger.
EnterDebugger debugger;
if (debugger.FailedToEnter()) {
- return Heap::undefined_value();
+ return;
}
// Notify the debug event listeners. Indicate auto continue if the break was
// a debug command break.
Debugger::OnDebugBreak(Factory::undefined_value(), debug_command_only);
-
- // Return to continue execution.
- return Heap::undefined_value();
}
+
+
#endif
Object* Execution::HandleStackGuardInterrupt() {
diff --git a/deps/v8/src/execution.h b/deps/v8/src/execution.h
index 52198c420..10683d69e 100644
--- a/deps/v8/src/execution.h
+++ b/deps/v8/src/execution.h
@@ -122,6 +122,7 @@ class Execution : public AllStatic {
Handle<Object> is_global);
#ifdef ENABLE_DEBUGGER_SUPPORT
static Object* DebugBreakHelper();
+ static void ProcessDebugMesssages(bool debug_command_only);
#endif
// If the stack guard is triggered, but it is not an actual
diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc
index 2a80953eb..8d2074964 100644
--- a/deps/v8/src/factory.cc
+++ b/deps/v8/src/factory.cc
@@ -718,6 +718,11 @@ Handle<JSFunction> Factory::NewFunction(Handle<String> name,
}
+Handle<Object> Factory::ToObject(Handle<Object> object) {
+ CALL_HEAP_FUNCTION(object->ToObject(), Object);
+}
+
+
Handle<Object> Factory::ToObject(Handle<Object> object,
Handle<Context> global_context) {
CALL_HEAP_FUNCTION(object->ToObject(*global_context), Object);
@@ -766,6 +771,8 @@ Handle<JSObject> Factory::NewArgumentsObject(Handle<Object> callee,
Handle<JSFunction> Factory::CreateApiFunction(
Handle<FunctionTemplateInfo> obj, ApiInstanceType instance_type) {
Handle<Code> code = Handle<Code>(Builtins::builtin(Builtins::HandleApiCall));
+ Handle<Code> construct_stub =
+ Handle<Code>(Builtins::builtin(Builtins::JSConstructStubApi));
int internal_field_count = 0;
if (!obj->instance_template()->IsUndefined()) {
@@ -840,6 +847,7 @@ Handle<JSFunction> Factory::CreateApiFunction(
}
result->shared()->set_function_data(*obj);
+ result->shared()->set_construct_stub(*construct_stub);
result->shared()->DontAdaptArguments();
// Recursively copy parent templates' accessors, 'data' may be modified.
diff --git a/deps/v8/src/factory.h b/deps/v8/src/factory.h
index fd277f20d..2a347cd6f 100644
--- a/deps/v8/src/factory.h
+++ b/deps/v8/src/factory.h
@@ -229,6 +229,7 @@ class Factory : public AllStatic {
static Handle<Code> CopyCode(Handle<Code> code);
+ static Handle<Object> ToObject(Handle<Object> object);
static Handle<Object> ToObject(Handle<Object> object,
Handle<Context> global_context);
diff --git a/deps/v8/src/fast-codegen.cc b/deps/v8/src/fast-codegen.cc
index 455dd5fab..4e6f259c6 100644
--- a/deps/v8/src/fast-codegen.cc
+++ b/deps/v8/src/fast-codegen.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -28,431 +28,410 @@
#include "v8.h"
#include "codegen-inl.h"
-#include "compiler.h"
+#include "data-flow.h"
#include "fast-codegen.h"
-#include "stub-cache.h"
-#include "debug.h"
+#include "scopes.h"
namespace v8 {
namespace internal {
-#define __ ACCESS_MASM(masm())
-
-Handle<Code> FastCodeGenerator::MakeCode(FunctionLiteral* fun,
- Handle<Script> script,
- bool is_eval) {
- CodeGenerator::MakeCodePrologue(fun);
- const int kInitialBufferSize = 4 * KB;
- MacroAssembler masm(NULL, kInitialBufferSize);
- FastCodeGenerator cgen(&masm, script, is_eval);
- cgen.Generate(fun);
- if (cgen.HasStackOverflow()) {
- ASSERT(!Top::has_pending_exception());
- return Handle<Code>::null();
+#define BAILOUT(reason) \
+ do { \
+ if (FLAG_trace_bailout) { \
+ PrintF("%s\n", reason); \
+ } \
+ has_supported_syntax_ = false; \
+ return; \
+ } while (false)
+
+
+#define CHECK_BAILOUT \
+ do { \
+ if (!has_supported_syntax_) return; \
+ } while (false)
+
+
+void FastCodeGenSyntaxChecker::Check(FunctionLiteral* fun,
+ CompilationInfo* info) {
+ info_ = info;
+
+ // We do not specialize if we do not have a receiver or if it is not a
+ // JS object with fast mode properties.
+ if (!info->has_receiver()) BAILOUT("No receiver");
+ if (!info->receiver()->IsJSObject()) BAILOUT("Receiver is not an object");
+ Handle<JSObject> object = Handle<JSObject>::cast(info->receiver());
+ if (!object->HasFastProperties()) BAILOUT("Receiver is in dictionary mode");
+
+ // We do not support stack or heap slots (both of which require
+ // allocation).
+ Scope* scope = fun->scope();
+ if (scope->num_stack_slots() > 0) {
+ BAILOUT("Function has stack-allocated locals");
}
- Code::Flags flags = Code::ComputeFlags(Code::FUNCTION, NOT_IN_LOOP);
- return CodeGenerator::MakeCodeEpilogue(fun, &masm, flags, script);
+ if (scope->num_heap_slots() > 0) {
+ BAILOUT("Function has context-allocated locals");
+ }
+
+ VisitDeclarations(scope->declarations());
+ CHECK_BAILOUT;
+
+ // We do not support empty function bodies.
+ if (fun->body()->is_empty()) BAILOUT("Function has an empty body");
+ VisitStatements(fun->body());
}
-int FastCodeGenerator::SlotOffset(Slot* slot) {
- ASSERT(slot != NULL);
- // Offset is negative because higher indexes are at lower addresses.
- int offset = -slot->index() * kPointerSize;
- // Adjust by a (parameter or local) base offset.
- switch (slot->type()) {
- case Slot::PARAMETER:
- offset += (function_->scope()->num_parameters() + 1) * kPointerSize;
- break;
- case Slot::LOCAL:
- offset += JavaScriptFrameConstants::kLocal0Offset;
- break;
- case Slot::CONTEXT:
- case Slot::LOOKUP:
- UNREACHABLE();
- }
- return offset;
-}
-
-
-void FastCodeGenerator::Apply(Expression::Context context, Register reg) {
- switch (context) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect:
- break;
- case Expression::kValue:
- __ push(reg);
- break;
- case Expression::kTest:
- TestAndBranch(reg, true_label_, false_label_);
- break;
- case Expression::kValueTest: {
- Label discard;
- __ push(reg);
- TestAndBranch(reg, true_label_, &discard);
- __ bind(&discard);
- __ Drop(1);
- __ jmp(false_label_);
- break;
- }
- case Expression::kTestValue: {
- Label discard;
- __ push(reg);
- TestAndBranch(reg, &discard, false_label_);
- __ bind(&discard);
- __ Drop(1);
- __ jmp(true_label_);
- }
- }
+void FastCodeGenSyntaxChecker::VisitDeclarations(
+ ZoneList<Declaration*>* decls) {
+ if (!decls->is_empty()) BAILOUT("Function has declarations");
}
-void FastCodeGenerator::VisitDeclarations(
- ZoneList<Declaration*>* declarations) {
- int length = declarations->length();
- int globals = 0;
- for (int i = 0; i < length; i++) {
- Declaration* decl = declarations->at(i);
- Variable* var = decl->proxy()->var();
- Slot* slot = var->slot();
-
- // If it was not possible to allocate the variable at compile
- // time, we need to "declare" it at runtime to make sure it
- // actually exists in the local context.
- if ((slot != NULL && slot->type() == Slot::LOOKUP) || !var->is_global()) {
- VisitDeclaration(decl);
- } else {
- // Count global variables and functions for later processing
- globals++;
- }
+void FastCodeGenSyntaxChecker::VisitStatements(ZoneList<Statement*>* stmts) {
+ for (int i = 0, len = stmts->length(); i < len; i++) {
+ Visit(stmts->at(i));
+ CHECK_BAILOUT;
}
+}
- // Compute array of global variable and function declarations.
- // Do nothing in case of no declared global functions or variables.
- if (globals > 0) {
- Handle<FixedArray> array = Factory::NewFixedArray(2 * globals, TENURED);
- for (int j = 0, i = 0; i < length; i++) {
- Declaration* decl = declarations->at(i);
- Variable* var = decl->proxy()->var();
- Slot* slot = var->slot();
-
- if ((slot == NULL || slot->type() != Slot::LOOKUP) && var->is_global()) {
- array->set(j++, *(var->name()));
- if (decl->fun() == NULL) {
- if (var->mode() == Variable::CONST) {
- // In case this is const property use the hole.
- array->set_the_hole(j++);
- } else {
- array->set_undefined(j++);
- }
- } else {
- Handle<JSFunction> function =
- Compiler::BuildBoilerplate(decl->fun(), script_, this);
- // Check for stack-overflow exception.
- if (HasStackOverflow()) return;
- array->set(j++, *function);
- }
- }
- }
- // Invoke the platform-dependent code generator to do the actual
- // declaration the global variables and functions.
- DeclareGlobals(array);
- }
+
+void FastCodeGenSyntaxChecker::VisitDeclaration(Declaration* decl) {
+ UNREACHABLE();
}
-void FastCodeGenerator::SetFunctionPosition(FunctionLiteral* fun) {
- if (FLAG_debug_info) {
- CodeGenerator::RecordPositions(masm_, fun->start_position());
- }
+void FastCodeGenSyntaxChecker::VisitBlock(Block* stmt) {
+ VisitStatements(stmt->statements());
}
-void FastCodeGenerator::SetReturnPosition(FunctionLiteral* fun) {
- if (FLAG_debug_info) {
- CodeGenerator::RecordPositions(masm_, fun->end_position());
- }
+void FastCodeGenSyntaxChecker::VisitExpressionStatement(
+ ExpressionStatement* stmt) {
+ Visit(stmt->expression());
}
-void FastCodeGenerator::SetStatementPosition(Statement* stmt) {
- if (FLAG_debug_info) {
- CodeGenerator::RecordPositions(masm_, stmt->statement_pos());
- }
+void FastCodeGenSyntaxChecker::VisitEmptyStatement(EmptyStatement* stmt) {
+ // Supported.
}
-void FastCodeGenerator::SetSourcePosition(int pos) {
- if (FLAG_debug_info && pos != RelocInfo::kNoPosition) {
- masm_->RecordPosition(pos);
- }
+void FastCodeGenSyntaxChecker::VisitIfStatement(IfStatement* stmt) {
+ BAILOUT("IfStatement");
}
-void FastCodeGenerator::EmitLogicalOperation(BinaryOperation* expr) {
-#ifdef DEBUG
- Expression::Context expected = Expression::kUninitialized;
- switch (expr->context()) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect:
- case Expression::kTest:
- // The value of the left subexpression is not needed.
- expected = Expression::kTest;
- break;
- case Expression::kValue:
- // The value of the left subexpression is needed and its specific
- // context depends on the operator.
- expected = (expr->op() == Token::OR)
- ? Expression::kValueTest
- : Expression::kTestValue;
- break;
- case Expression::kValueTest:
- // The value of the left subexpression is needed for OR.
- expected = (expr->op() == Token::OR)
- ? Expression::kValueTest
- : Expression::kTest;
- break;
- case Expression::kTestValue:
- // The value of the left subexpression is needed for AND.
- expected = (expr->op() == Token::OR)
- ? Expression::kTest
- : Expression::kTestValue;
- break;
- }
- ASSERT_EQ(expected, expr->left()->context());
- ASSERT_EQ(expr->context(), expr->right()->context());
-#endif
-
- Label eval_right, done;
-
- // Set up the appropriate context for the left subexpression based
- // on the operation and our own context. Initially assume we can
- // inherit both true and false labels from our context.
- Label* if_true = true_label_;
- Label* if_false = false_label_;
- if (expr->op() == Token::OR) {
- // If we are not in some kind of a test context, we did not inherit a
- // true label from our context. Use the end of the expression.
- if (expr->context() == Expression::kEffect ||
- expr->context() == Expression::kValue) {
- if_true = &done;
- }
- // The false label is the label of the right subexpression.
- if_false = &eval_right;
- } else {
- ASSERT_EQ(Token::AND, expr->op());
- // The true label is the label of the right subexpression.
- if_true = &eval_right;
- // If we are not in some kind of a test context, we did not inherit a
- // false label from our context. Use the end of the expression.
- if (expr->context() == Expression::kEffect ||
- expr->context() == Expression::kValue) {
- if_false = &done;
- }
- }
- VisitForControl(expr->left(), if_true, if_false);
+void FastCodeGenSyntaxChecker::VisitContinueStatement(ContinueStatement* stmt) {
+ BAILOUT("Continuestatement");
+}
- __ bind(&eval_right);
- Visit(expr->right());
- __ bind(&done);
+void FastCodeGenSyntaxChecker::VisitBreakStatement(BreakStatement* stmt) {
+ BAILOUT("BreakStatement");
}
-void FastCodeGenerator::VisitBlock(Block* stmt) {
- Comment cmnt(masm_, "[ Block");
- Breakable nested_statement(this, stmt);
- SetStatementPosition(stmt);
- VisitStatements(stmt->statements());
- __ bind(nested_statement.break_target());
+void FastCodeGenSyntaxChecker::VisitReturnStatement(ReturnStatement* stmt) {
+ BAILOUT("ReturnStatement");
}
-void FastCodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) {
- Comment cmnt(masm_, "[ ExpressionStatement");
- SetStatementPosition(stmt);
- Visit(stmt->expression());
+void FastCodeGenSyntaxChecker::VisitWithEnterStatement(
+ WithEnterStatement* stmt) {
+ BAILOUT("WithEnterStatement");
}
-void FastCodeGenerator::VisitEmptyStatement(EmptyStatement* stmt) {
- Comment cmnt(masm_, "[ EmptyStatement");
- SetStatementPosition(stmt);
+void FastCodeGenSyntaxChecker::VisitWithExitStatement(WithExitStatement* stmt) {
+ BAILOUT("WithExitStatement");
}
-void FastCodeGenerator::VisitIfStatement(IfStatement* stmt) {
- Comment cmnt(masm_, "[ IfStatement");
- SetStatementPosition(stmt);
- Label then_part, else_part, done;
+void FastCodeGenSyntaxChecker::VisitSwitchStatement(SwitchStatement* stmt) {
+ BAILOUT("SwitchStatement");
+}
- // Do not worry about optimizing for empty then or else bodies.
- VisitForControl(stmt->condition(), &then_part, &else_part);
- __ bind(&then_part);
- Visit(stmt->then_statement());
- __ jmp(&done);
+void FastCodeGenSyntaxChecker::VisitDoWhileStatement(DoWhileStatement* stmt) {
+ BAILOUT("DoWhileStatement");
+}
- __ bind(&else_part);
- Visit(stmt->else_statement());
- __ bind(&done);
+void FastCodeGenSyntaxChecker::VisitWhileStatement(WhileStatement* stmt) {
+ BAILOUT("WhileStatement");
}
-void FastCodeGenerator::VisitContinueStatement(ContinueStatement* stmt) {
- Comment cmnt(masm_, "[ ContinueStatement");
- SetStatementPosition(stmt);
- NestedStatement* current = nesting_stack_;
- int stack_depth = 0;
- while (!current->IsContinueTarget(stmt->target())) {
- stack_depth = current->Exit(stack_depth);
- current = current->outer();
- }
- __ Drop(stack_depth);
+void FastCodeGenSyntaxChecker::VisitForStatement(ForStatement* stmt) {
+ BAILOUT("ForStatement");
+}
+
- Iteration* loop = current->AsIteration();
- __ jmp(loop->continue_target());
+void FastCodeGenSyntaxChecker::VisitForInStatement(ForInStatement* stmt) {
+ BAILOUT("ForInStatement");
}
-void FastCodeGenerator::VisitBreakStatement(BreakStatement* stmt) {
- Comment cmnt(masm_, "[ BreakStatement");
- SetStatementPosition(stmt);
- NestedStatement* current = nesting_stack_;
- int stack_depth = 0;
- while (!current->IsBreakTarget(stmt->target())) {
- stack_depth = current->Exit(stack_depth);
- current = current->outer();
- }
- __ Drop(stack_depth);
+void FastCodeGenSyntaxChecker::VisitTryCatchStatement(TryCatchStatement* stmt) {
+ BAILOUT("TryCatchStatement");
+}
+
- Breakable* target = current->AsBreakable();
- __ jmp(target->break_target());
+void FastCodeGenSyntaxChecker::VisitTryFinallyStatement(
+ TryFinallyStatement* stmt) {
+ BAILOUT("TryFinallyStatement");
}
-void FastCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
- Comment cmnt(masm_, "[ ReturnStatement");
- SetStatementPosition(stmt);
- Expression* expr = stmt->expression();
- // Complete the statement based on the type of the subexpression.
- if (expr->AsLiteral() != NULL) {
- __ Move(result_register(), expr->AsLiteral()->handle());
- } else {
- ASSERT_EQ(Expression::kValue, expr->context());
- Visit(expr);
- __ pop(result_register());
- }
+void FastCodeGenSyntaxChecker::VisitDebuggerStatement(
+ DebuggerStatement* stmt) {
+ BAILOUT("DebuggerStatement");
+}
- // Exit all nested statements.
- NestedStatement* current = nesting_stack_;
- int stack_depth = 0;
- while (current != NULL) {
- stack_depth = current->Exit(stack_depth);
- current = current->outer();
- }
- __ Drop(stack_depth);
- EmitReturnSequence(stmt->statement_pos());
+void FastCodeGenSyntaxChecker::VisitFunctionLiteral(FunctionLiteral* expr) {
+ BAILOUT("FunctionLiteral");
}
+void FastCodeGenSyntaxChecker::VisitFunctionBoilerplateLiteral(
+ FunctionBoilerplateLiteral* expr) {
+ BAILOUT("FunctionBoilerplateLiteral");
+}
-void FastCodeGenerator::VisitWithEnterStatement(WithEnterStatement* stmt) {
- Comment cmnt(masm_, "[ WithEnterStatement");
- SetStatementPosition(stmt);
+void FastCodeGenSyntaxChecker::VisitConditional(Conditional* expr) {
+ BAILOUT("Conditional");
+}
- Visit(stmt->expression());
- if (stmt->is_catch_block()) {
- __ CallRuntime(Runtime::kPushCatchContext, 1);
+
+void FastCodeGenSyntaxChecker::VisitSlot(Slot* expr) {
+ UNREACHABLE();
+}
+
+
+void FastCodeGenSyntaxChecker::VisitVariableProxy(VariableProxy* expr) {
+ // Only global variable references are supported.
+ Variable* var = expr->var();
+ if (!var->is_global()) BAILOUT("Non-global variable");
+}
+
+
+void FastCodeGenSyntaxChecker::VisitLiteral(Literal* expr) {
+ BAILOUT("Literal");
+}
+
+
+void FastCodeGenSyntaxChecker::VisitRegExpLiteral(RegExpLiteral* expr) {
+ BAILOUT("RegExpLiteral");
+}
+
+
+void FastCodeGenSyntaxChecker::VisitObjectLiteral(ObjectLiteral* expr) {
+ BAILOUT("ObjectLiteral");
+}
+
+
+void FastCodeGenSyntaxChecker::VisitArrayLiteral(ArrayLiteral* expr) {
+ BAILOUT("ArrayLiteral");
+}
+
+
+void FastCodeGenSyntaxChecker::VisitCatchExtensionObject(
+ CatchExtensionObject* expr) {
+ BAILOUT("CatchExtensionObject");
+}
+
+
+void FastCodeGenSyntaxChecker::VisitAssignment(Assignment* expr) {
+ // Simple assignments to (named) this properties are supported.
+ if (expr->op() != Token::ASSIGN) BAILOUT("Non-simple assignment");
+
+ Property* prop = expr->target()->AsProperty();
+ if (prop == NULL) BAILOUT("Non-property assignment");
+ VariableProxy* proxy = prop->obj()->AsVariableProxy();
+ if (proxy == NULL || !proxy->var()->is_this()) {
+ BAILOUT("Non-this-property assignment");
+ }
+ if (!prop->key()->IsPropertyName()) {
+ BAILOUT("Non-named-property assignment");
+ }
+
+ // We will only specialize for fields on the object itself.
+ // Expression::IsPropertyName implies that the name is a literal
+ // symbol but we do not assume that.
+ Literal* key = prop->key()->AsLiteral();
+ if (key != NULL && key->handle()->IsString()) {
+ Handle<Object> receiver = info()->receiver();
+ Handle<String> name = Handle<String>::cast(key->handle());
+ LookupResult lookup;
+ receiver->Lookup(*name, &lookup);
+ if (lookup.holder() != *receiver) BAILOUT("Non-own property assignment");
+ if (!lookup.type() == FIELD) BAILOUT("Non-field property assignment");
} else {
- __ CallRuntime(Runtime::kPushContext, 1);
+ UNREACHABLE();
+ BAILOUT("Unexpected non-string-literal property key");
}
- // Both runtime calls return the new context in both the context and the
- // result registers.
- // Update local stack frame context field.
- StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
+ Visit(expr->value());
}
-void FastCodeGenerator::VisitWithExitStatement(WithExitStatement* stmt) {
- Comment cmnt(masm_, "[ WithExitStatement");
- SetStatementPosition(stmt);
+void FastCodeGenSyntaxChecker::VisitThrow(Throw* expr) {
+ BAILOUT("Throw");
+}
- // Pop context.
- LoadContextField(context_register(), Context::PREVIOUS_INDEX);
- // Update local stack frame context field.
- StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
+
+void FastCodeGenSyntaxChecker::VisitProperty(Property* expr) {
+ BAILOUT("Property");
}
-void FastCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
+void FastCodeGenSyntaxChecker::VisitCall(Call* expr) {
+ BAILOUT("Call");
+}
+
+
+void FastCodeGenSyntaxChecker::VisitCallNew(CallNew* expr) {
+ BAILOUT("CallNew");
+}
+
+
+void FastCodeGenSyntaxChecker::VisitCallRuntime(CallRuntime* expr) {
+ BAILOUT("CallRuntime");
+}
+
+
+void FastCodeGenSyntaxChecker::VisitUnaryOperation(UnaryOperation* expr) {
+ BAILOUT("UnaryOperation");
+}
+
+
+void FastCodeGenSyntaxChecker::VisitCountOperation(CountOperation* expr) {
+ BAILOUT("CountOperation");
+}
+
+
+void FastCodeGenSyntaxChecker::VisitBinaryOperation(BinaryOperation* expr) {
+ BAILOUT("BinaryOperation");
+}
+
+
+void FastCodeGenSyntaxChecker::VisitCompareOperation(CompareOperation* expr) {
+ BAILOUT("CompareOperation");
+}
+
+
+void FastCodeGenSyntaxChecker::VisitThisFunction(ThisFunction* expr) {
+ BAILOUT("ThisFunction");
+}
+
+#undef BAILOUT
+#undef CHECK_BAILOUT
+
+
+#define __ ACCESS_MASM(masm())
+
+Handle<Code> FastCodeGenerator::MakeCode(FunctionLiteral* fun,
+ Handle<Script> script,
+ bool is_eval,
+ CompilationInfo* info) {
+ // Label the AST before calling MakeCodePrologue, so AST node numbers are
+ // printed with the AST.
+ AstLabeler labeler;
+ labeler.Label(fun);
+ info->set_has_this_properties(labeler.has_this_properties());
+
+ CodeGenerator::MakeCodePrologue(fun);
+
+ const int kInitialBufferSize = 4 * KB;
+ MacroAssembler masm(NULL, kInitialBufferSize);
+
+ // Generate the fast-path code.
+ FastCodeGenerator fast_cgen(&masm, script, is_eval);
+ fast_cgen.Generate(fun, info);
+ if (fast_cgen.HasStackOverflow()) {
+ ASSERT(!Top::has_pending_exception());
+ return Handle<Code>::null();
+ }
+
+ // Generate the full code for the function in bailout mode, using the same
+ // macro assembler.
+ CodeGenerator cgen(&masm, script, is_eval);
+ CodeGeneratorScope scope(&cgen);
+ cgen.Generate(fun, CodeGenerator::SECONDARY, info);
+ if (cgen.HasStackOverflow()) {
+ ASSERT(!Top::has_pending_exception());
+ return Handle<Code>::null();
+ }
+
+ Code::Flags flags = Code::ComputeFlags(Code::FUNCTION, NOT_IN_LOOP);
+ return CodeGenerator::MakeCodeEpilogue(fun, &masm, flags, script);
+}
+
+
+void FastCodeGenerator::VisitDeclaration(Declaration* decl) {
UNREACHABLE();
}
-void FastCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
- Comment cmnt(masm_, "[ DoWhileStatement");
- SetStatementPosition(stmt);
- Label body, stack_limit_hit, stack_check_success;
+void FastCodeGenerator::VisitBlock(Block* stmt) {
+ VisitStatements(stmt->statements());
+}
- Iteration loop_statement(this, stmt);
- increment_loop_depth();
- __ bind(&body);
- Visit(stmt->body());
+void FastCodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) {
+ Visit(stmt->expression());
+}
- // Check stack before looping.
- __ StackLimitCheck(&stack_limit_hit);
- __ bind(&stack_check_success);
- __ bind(loop_statement.continue_target());
- VisitForControl(stmt->cond(), &body, loop_statement.break_target());
+void FastCodeGenerator::VisitEmptyStatement(EmptyStatement* stmt) {
+ // Nothing to do.
+}
- __ bind(&stack_limit_hit);
- StackCheckStub stack_stub;
- __ CallStub(&stack_stub);
- __ jmp(&stack_check_success);
- __ bind(loop_statement.break_target());
+void FastCodeGenerator::VisitIfStatement(IfStatement* stmt) {
+ UNREACHABLE();
+}
+
- decrement_loop_depth();
+void FastCodeGenerator::VisitContinueStatement(ContinueStatement* stmt) {
+ UNREACHABLE();
}
-void FastCodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
- Comment cmnt(masm_, "[ WhileStatement");
- SetStatementPosition(stmt);
- Label body, stack_limit_hit, stack_check_success;
+void FastCodeGenerator::VisitBreakStatement(BreakStatement* stmt) {
+ UNREACHABLE();
+}
+
- Iteration loop_statement(this, stmt);
- increment_loop_depth();
+void FastCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
+ UNREACHABLE();
+}
- // Emit the test at the bottom of the loop.
- __ jmp(loop_statement.continue_target());
- __ bind(&body);
- Visit(stmt->body());
+void FastCodeGenerator::VisitWithEnterStatement(WithEnterStatement* stmt) {
+ UNREACHABLE();
+}
- __ bind(loop_statement.continue_target());
- // Check stack before looping.
- __ StackLimitCheck(&stack_limit_hit);
- __ bind(&stack_check_success);
- VisitForControl(stmt->cond(), &body, loop_statement.break_target());
+void FastCodeGenerator::VisitWithExitStatement(WithExitStatement* stmt) {
+ UNREACHABLE();
+}
- __ bind(&stack_limit_hit);
- StackCheckStub stack_stub;
- __ CallStub(&stack_stub);
- __ jmp(&stack_check_success);
- __ bind(loop_statement.break_target());
- decrement_loop_depth();
+void FastCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
+ UNREACHABLE();
}
@@ -467,120 +446,22 @@ void FastCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
void FastCodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
- Comment cmnt(masm_, "[ TryCatchStatement");
- SetStatementPosition(stmt);
- // The try block adds a handler to the exception handler chain
- // before entering, and removes it again when exiting normally.
- // If an exception is thrown during execution of the try block,
- // control is passed to the handler, which also consumes the handler.
- // At this point, the exception is in a register, and store it in
- // the temporary local variable (prints as ".catch-var") before
- // executing the catch block. The catch block has been rewritten
- // to introduce a new scope to bind the catch variable and to remove
- // that scope again afterwards.
-
- Label try_handler_setup, catch_entry, done;
-
- __ Call(&try_handler_setup);
- // Try handler code, exception in result register.
-
- // Store exception in local .catch variable before executing catch block.
- {
- // The catch variable is *always* a variable proxy for a local variable.
- Variable* catch_var = stmt->catch_var()->AsVariableProxy()->AsVariable();
- ASSERT_NOT_NULL(catch_var);
- Slot* variable_slot = catch_var->slot();
- ASSERT_NOT_NULL(variable_slot);
- ASSERT_EQ(Slot::LOCAL, variable_slot->type());
- StoreToFrameField(SlotOffset(variable_slot), result_register());
- }
-
- Visit(stmt->catch_block());
- __ jmp(&done);
-
- // Try block code. Sets up the exception handler chain.
- __ bind(&try_handler_setup);
- {
- TryCatch try_block(this, &catch_entry);
- __ PushTryHandler(IN_JAVASCRIPT, TRY_CATCH_HANDLER);
- Visit(stmt->try_block());
- __ PopTryHandler();
- }
- __ bind(&done);
+ UNREACHABLE();
}
void FastCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
- Comment cmnt(masm_, "[ TryFinallyStatement");
- SetStatementPosition(stmt);
- // Try finally is compiled by setting up a try-handler on the stack while
- // executing the try body, and removing it again afterwards.
- //
- // The try-finally construct can enter the finally block in three ways:
- // 1. By exiting the try-block normally. This removes the try-handler and
- // calls the finally block code before continuing.
- // 2. By exiting the try-block with a function-local control flow transfer
- // (break/continue/return). The site of the, e.g., break removes the
- // try handler and calls the finally block code before continuing
- // its outward control transfer.
- // 3. by exiting the try-block with a thrown exception.
- // This can happen in nested function calls. It traverses the try-handler
- // chain and consumes the try-handler entry before jumping to the
- // handler code. The handler code then calls the finally-block before
- // rethrowing the exception.
- //
- // The finally block must assume a return address on top of the stack
- // (or in the link register on ARM chips) and a value (return value or
- // exception) in the result register (rax/eax/r0), both of which must
- // be preserved. The return address isn't GC-safe, so it should be
- // cooked before GC.
- Label finally_entry;
- Label try_handler_setup;
-
- // Setup the try-handler chain. Use a call to
- // Jump to try-handler setup and try-block code. Use call to put try-handler
- // address on stack.
- __ Call(&try_handler_setup);
- // Try handler code. Return address of call is pushed on handler stack.
- {
- // This code is only executed during stack-handler traversal when an
- // exception is thrown. The execption is in the result register, which
- // is retained by the finally block.
- // Call the finally block and then rethrow the exception.
- __ Call(&finally_entry);
- __ push(result_register());
- __ CallRuntime(Runtime::kReThrow, 1);
- }
+ UNREACHABLE();
+}
- __ bind(&finally_entry);
- {
- // Finally block implementation.
- Finally finally_block(this);
- EnterFinallyBlock();
- Visit(stmt->finally_block());
- ExitFinallyBlock(); // Return to the calling code.
- }
- __ bind(&try_handler_setup);
- {
- // Setup try handler (stack pointer registers).
- TryFinally try_block(this, &finally_entry);
- __ PushTryHandler(IN_JAVASCRIPT, TRY_FINALLY_HANDLER);
- Visit(stmt->try_block());
- __ PopTryHandler();
- }
- // Execute the finally block on the way out.
- __ Call(&finally_entry);
+void FastCodeGenerator::VisitDebuggerStatement(DebuggerStatement* stmt) {
+ UNREACHABLE();
}
-void FastCodeGenerator::VisitDebuggerStatement(DebuggerStatement* stmt) {
-#ifdef ENABLE_DEBUGGER_SUPPORT
- Comment cmnt(masm_, "[ DebuggerStatement");
- SetStatementPosition(stmt);
- __ CallRuntime(Runtime::kDebugBreak, 0);
- // Ignore the return value.
-#endif
+void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
+ UNREACHABLE();
}
@@ -591,169 +472,123 @@ void FastCodeGenerator::VisitFunctionBoilerplateLiteral(
void FastCodeGenerator::VisitConditional(Conditional* expr) {
- Comment cmnt(masm_, "[ Conditional");
- ASSERT_EQ(Expression::kTest, expr->condition()->context());
- ASSERT_EQ(expr->context(), expr->then_expression()->context());
- ASSERT_EQ(expr->context(), expr->else_expression()->context());
+ UNREACHABLE();
+}
- Label true_case, false_case, done;
- VisitForControl(expr->condition(), &true_case, &false_case);
+void FastCodeGenerator::VisitSlot(Slot* expr) {
+ UNREACHABLE();
+}
- __ bind(&true_case);
- Visit(expr->then_expression());
- // If control flow falls through Visit, jump to done.
- if (expr->context() == Expression::kEffect ||
- expr->context() == Expression::kValue) {
- __ jmp(&done);
- }
- __ bind(&false_case);
- Visit(expr->else_expression());
- // If control flow falls through Visit, merge it with true case here.
- if (expr->context() == Expression::kEffect ||
- expr->context() == Expression::kValue) {
- __ bind(&done);
+void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
+ ASSERT(expr->var()->is_global() && !expr->var()->is_this());
+ Comment cmnt(masm(), ";; Global");
+ if (FLAG_print_ir) {
+ SmartPointer<char> name = expr->name()->ToCString();
+ PrintF("%d: t%d = Global(%s)\n", expr->num(), expr->num(), *name);
}
+ EmitGlobalVariableLoad(expr->name());
}
-void FastCodeGenerator::VisitSlot(Slot* expr) {
- // Slots do not appear directly in the AST.
+void FastCodeGenerator::VisitLiteral(Literal* expr) {
UNREACHABLE();
}
-void FastCodeGenerator::VisitLiteral(Literal* expr) {
- Comment cmnt(masm_, "[ Literal");
- Apply(expr->context(), expr);
+void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
+ UNREACHABLE();
}
-void FastCodeGenerator::VisitAssignment(Assignment* expr) {
- Comment cmnt(masm_, "[ Assignment");
+void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
+ UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
+ UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* expr) {
+ UNREACHABLE();
+}
+
- // Record source code position of the (possible) IC call.
- SetSourcePosition(expr->position());
+void FastCodeGenerator::VisitAssignment(Assignment* expr) {
+ // Known to be a simple this property assignment.
+ Visit(expr->value());
- // Left-hand side can only be a property, a global or a (parameter or local)
- // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
- enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
- LhsKind assign_type = VARIABLE;
Property* prop = expr->target()->AsProperty();
- // In case of a property we use the uninitialized expression context
- // of the key to detect a named property.
- if (prop != NULL) {
- assign_type = (prop->key()->context() == Expression::kUninitialized)
- ? NAMED_PROPERTY
- : KEYED_PROPERTY;
+ ASSERT_NOT_NULL(prop);
+ ASSERT_NOT_NULL(prop->obj()->AsVariableProxy());
+ ASSERT(prop->obj()->AsVariableProxy()->var()->is_this());
+ ASSERT(prop->key()->IsPropertyName());
+ Handle<String> name =
+ Handle<String>::cast(prop->key()->AsLiteral()->handle());
+
+ Comment cmnt(masm(), ";; Store(this)");
+ if (FLAG_print_ir) {
+ SmartPointer<char> name_string = name->ToCString();
+ PrintF("%d: t%d = Store(this, \"%s\", t%d)\n",
+ expr->num(), expr->num(), *name_string, expr->value()->num());
}
- // Evaluate LHS expression.
- switch (assign_type) {
- case VARIABLE:
- // Nothing to do here.
- break;
- case NAMED_PROPERTY:
- Visit(prop->obj());
- ASSERT_EQ(Expression::kValue, prop->obj()->context());
- break;
- case KEYED_PROPERTY:
- Visit(prop->obj());
- ASSERT_EQ(Expression::kValue, prop->obj()->context());
- Visit(prop->key());
- ASSERT_EQ(Expression::kValue, prop->key()->context());
- break;
- }
+ EmitThisPropertyStore(name);
+}
- // If we have a compound assignment: Get value of LHS expression and
- // store in on top of the stack.
- // Note: Relies on kValue context being 'stack'.
- if (expr->is_compound()) {
- switch (assign_type) {
- case VARIABLE:
- EmitVariableLoad(expr->target()->AsVariableProxy()->var(),
- Expression::kValue);
- break;
- case NAMED_PROPERTY:
- EmitNamedPropertyLoad(prop, Expression::kValue);
- break;
- case KEYED_PROPERTY:
- EmitKeyedPropertyLoad(prop, Expression::kValue);
- break;
- }
- }
- // Evaluate RHS expression.
- Expression* rhs = expr->value();
- ASSERT_EQ(Expression::kValue, rhs->context());
- Visit(rhs);
+void FastCodeGenerator::VisitThrow(Throw* expr) {
+ UNREACHABLE();
+}
- // If we have a compount assignment: Apply operator.
- if (expr->is_compound()) {
- EmitCompoundAssignmentOp(expr->binary_op(), Expression::kValue);
- }
- // Store the value.
- switch (assign_type) {
- case VARIABLE:
- EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
- expr->context());
- break;
- case NAMED_PROPERTY:
- EmitNamedPropertyAssignment(expr);
- break;
- case KEYED_PROPERTY:
- EmitKeyedPropertyAssignment(expr);
- break;
- }
+void FastCodeGenerator::VisitProperty(Property* expr) {
+ UNREACHABLE();
}
-void FastCodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* expr) {
- // Call runtime routine to allocate the catch extension object and
- // assign the exception value to the catch variable.
- Comment cmnt(masm_, "[ CatchExtensionObject");
-
- // Push key string.
- ASSERT_EQ(Expression::kValue, expr->key()->context());
- Visit(expr->key());
- ASSERT_EQ(Expression::kValue, expr->value()->context());
- Visit(expr->value());
+void FastCodeGenerator::VisitCall(Call* expr) {
+ UNREACHABLE();
+}
- // Create catch extension object.
- __ CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
- __ push(result_register());
+void FastCodeGenerator::VisitCallNew(CallNew* expr) {
+ UNREACHABLE();
}
-void FastCodeGenerator::VisitThrow(Throw* expr) {
- Comment cmnt(masm_, "[ Throw");
- Visit(expr->exception());
- // Exception is on stack.
- __ CallRuntime(Runtime::kThrow, 1);
- // Never returns here.
+void FastCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
+ UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
+ UNREACHABLE();
}
-int FastCodeGenerator::TryFinally::Exit(int stack_depth) {
- // The macros used here must preserve the result register.
- __ Drop(stack_depth);
- __ PopTryHandler();
- __ Call(finally_entry_);
- return 0;
+void FastCodeGenerator::VisitCountOperation(CountOperation* expr) {
+ UNREACHABLE();
}
-int FastCodeGenerator::TryCatch::Exit(int stack_depth) {
- // The macros used here must preserve the result register.
- __ Drop(stack_depth);
- __ PopTryHandler();
- return 0;
+void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
+ UNREACHABLE();
}
+void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
+ UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitThisFunction(ThisFunction* expr) {
+ UNREACHABLE();
+}
+
#undef __
diff --git a/deps/v8/src/fast-codegen.h b/deps/v8/src/fast-codegen.h
index ecac8e7fa..b40f6fb7f 100644
--- a/deps/v8/src/fast-codegen.h
+++ b/deps/v8/src/fast-codegen.h
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -31,316 +31,96 @@
#include "v8.h"
#include "ast.h"
+#include "compiler.h"
namespace v8 {
namespace internal {
-// -----------------------------------------------------------------------------
-// Fast code generator.
-
-class FastCodeGenerator: public AstVisitor {
+class FastCodeGenSyntaxChecker: public AstVisitor {
public:
- FastCodeGenerator(MacroAssembler* masm, Handle<Script> script, bool is_eval)
- : masm_(masm),
- function_(NULL),
- script_(script),
- is_eval_(is_eval),
- nesting_stack_(NULL),
- loop_depth_(0),
- true_label_(NULL),
- false_label_(NULL) {
+ explicit FastCodeGenSyntaxChecker()
+ : info_(NULL), has_supported_syntax_(true) {
}
- static Handle<Code> MakeCode(FunctionLiteral* fun,
- Handle<Script> script,
- bool is_eval);
+ void Check(FunctionLiteral* fun, CompilationInfo* info);
- void Generate(FunctionLiteral* fun);
+ CompilationInfo* info() { return info_; }
+ bool has_supported_syntax() { return has_supported_syntax_; }
private:
- class Breakable;
- class Iteration;
- class TryCatch;
- class TryFinally;
- class Finally;
- class ForIn;
-
- class NestedStatement BASE_EMBEDDED {
- public:
- explicit NestedStatement(FastCodeGenerator* codegen) : codegen_(codegen) {
- // Link into codegen's nesting stack.
- previous_ = codegen->nesting_stack_;
- codegen->nesting_stack_ = this;
- }
- virtual ~NestedStatement() {
- // Unlink from codegen's nesting stack.
- ASSERT_EQ(this, codegen_->nesting_stack_);
- codegen_->nesting_stack_ = previous_;
- }
-
- virtual Breakable* AsBreakable() { return NULL; }
- virtual Iteration* AsIteration() { return NULL; }
- virtual TryCatch* AsTryCatch() { return NULL; }
- virtual TryFinally* AsTryFinally() { return NULL; }
- virtual Finally* AsFinally() { return NULL; }
- virtual ForIn* AsForIn() { return NULL; }
-
- virtual bool IsContinueTarget(Statement* target) { return false; }
- virtual bool IsBreakTarget(Statement* target) { return false; }
-
- // Generate code to leave the nested statement. This includes
- // cleaning up any stack elements in use and restoring the
- // stack to the expectations of the surrounding statements.
- // Takes a number of stack elements currently on top of the
- // nested statement's stack, and returns a number of stack
- // elements left on top of the surrounding statement's stack.
- // The generated code must preserve the result register (which
- // contains the value in case of a return).
- virtual int Exit(int stack_depth) {
- // Default implementation for the case where there is
- // nothing to clean up.
- return stack_depth;
- }
- NestedStatement* outer() { return previous_; }
- protected:
- MacroAssembler* masm() { return codegen_->masm(); }
- private:
- FastCodeGenerator* codegen_;
- NestedStatement* previous_;
- DISALLOW_COPY_AND_ASSIGN(NestedStatement);
- };
-
- class Breakable : public NestedStatement {
- public:
- Breakable(FastCodeGenerator* codegen,
- BreakableStatement* break_target)
- : NestedStatement(codegen),
- target_(break_target) {}
- virtual ~Breakable() {}
- virtual Breakable* AsBreakable() { return this; }
- virtual bool IsBreakTarget(Statement* statement) {
- return target_ == statement;
- }
- BreakableStatement* statement() { return target_; }
- Label* break_target() { return &break_target_label_; }
- private:
- BreakableStatement* target_;
- Label break_target_label_;
- DISALLOW_COPY_AND_ASSIGN(Breakable);
- };
-
- class Iteration : public Breakable {
- public:
- Iteration(FastCodeGenerator* codegen,
- IterationStatement* iteration_statement)
- : Breakable(codegen, iteration_statement) {}
- virtual ~Iteration() {}
- virtual Iteration* AsIteration() { return this; }
- virtual bool IsContinueTarget(Statement* statement) {
- return this->statement() == statement;
- }
- Label* continue_target() { return &continue_target_label_; }
- private:
- Label continue_target_label_;
- DISALLOW_COPY_AND_ASSIGN(Iteration);
- };
+ void VisitDeclarations(ZoneList<Declaration*>* decls);
+ void VisitStatements(ZoneList<Statement*>* stmts);
- // The environment inside the try block of a try/catch statement.
- class TryCatch : public NestedStatement {
- public:
- explicit TryCatch(FastCodeGenerator* codegen, Label* catch_entry)
- : NestedStatement(codegen), catch_entry_(catch_entry) { }
- virtual ~TryCatch() {}
- virtual TryCatch* AsTryCatch() { return this; }
- Label* catch_entry() { return catch_entry_; }
- virtual int Exit(int stack_depth);
- private:
- Label* catch_entry_;
- DISALLOW_COPY_AND_ASSIGN(TryCatch);
- };
-
- // The environment inside the try block of a try/finally statement.
- class TryFinally : public NestedStatement {
- public:
- explicit TryFinally(FastCodeGenerator* codegen, Label* finally_entry)
- : NestedStatement(codegen), finally_entry_(finally_entry) { }
- virtual ~TryFinally() {}
- virtual TryFinally* AsTryFinally() { return this; }
- Label* finally_entry() { return finally_entry_; }
- virtual int Exit(int stack_depth);
- private:
- Label* finally_entry_;
- DISALLOW_COPY_AND_ASSIGN(TryFinally);
- };
-
- // A FinallyEnvironment represents being inside a finally block.
- // Abnormal termination of the finally block needs to clean up
- // the block's parameters from the stack.
- class Finally : public NestedStatement {
- public:
- explicit Finally(FastCodeGenerator* codegen) : NestedStatement(codegen) { }
- virtual ~Finally() {}
- virtual Finally* AsFinally() { return this; }
- virtual int Exit(int stack_depth) {
- return stack_depth + kFinallyStackElementCount;
- }
- private:
- // Number of extra stack slots occupied during a finally block.
- static const int kFinallyStackElementCount = 2;
- DISALLOW_COPY_AND_ASSIGN(Finally);
- };
-
- // A ForInEnvironment represents being inside a for-in loop.
- // Abnormal termination of the for-in block needs to clean up
- // the block's temporary storage from the stack.
- class ForIn : public Iteration {
- public:
- ForIn(FastCodeGenerator* codegen,
- ForInStatement* statement)
- : Iteration(codegen, statement) { }
- virtual ~ForIn() {}
- virtual ForIn* AsForIn() { return this; }
- virtual int Exit(int stack_depth) {
- return stack_depth + kForInStackElementCount;
- }
- private:
- // TODO(lrn): Check that this value is correct when implementing
- // for-in.
- static const int kForInStackElementCount = 5;
- DISALLOW_COPY_AND_ASSIGN(ForIn);
- };
-
-
- int SlotOffset(Slot* slot);
-
- // Emit code to complete the evaluation of an expression based on its
- // expression context and given its value is in a register, non-lookup
- // slot, or a literal.
- void Apply(Expression::Context context, Register reg);
- void Apply(Expression::Context context, Slot* slot, Register scratch);
- void Apply(Expression::Context context, Literal* lit);
-
- // Emit code to complete the evaluation of an expression based on its
- // expression context and given its value is on top of the stack.
- void ApplyTOS(Expression::Context context);
-
- // Emit code to discard count elements from the top of stack, then
- // complete the evaluation of an expression based on its expression
- // context and given its value is in a register.
- void DropAndApply(int count, Expression::Context context, Register reg);
+ // AST node visit functions.
+#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
+ AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
- void Move(Slot* dst, Register source, Register scratch1, Register scratch2);
- void Move(Register dst, Slot* source);
+ CompilationInfo* info_;
+ bool has_supported_syntax_;
- // Return an operand used to read/write to a known (ie, non-LOOKUP) slot.
- // May emit code to traverse the context chain, destroying the scratch
- // register.
- MemOperand EmitSlotSearch(Slot* slot, Register scratch);
+ DISALLOW_COPY_AND_ASSIGN(FastCodeGenSyntaxChecker);
+};
- // Test the JavaScript value in source as if in a test context, compile
- // control flow to a pair of labels.
- void TestAndBranch(Register source, Label* true_label, Label* false_label);
- void VisitForControl(Expression* expr, Label* if_true, Label* if_false) {
- ASSERT(expr->context() == Expression::kTest ||
- expr->context() == Expression::kValueTest ||
- expr->context() == Expression::kTestValue);
- Label* saved_true = true_label_;
- Label* saved_false = false_label_;
- true_label_ = if_true;
- false_label_ = if_false;
- Visit(expr);
- true_label_ = saved_true;
- false_label_ = saved_false;
+class FastCodeGenerator: public AstVisitor {
+ public:
+ FastCodeGenerator(MacroAssembler* masm, Handle<Script> script, bool is_eval)
+ : masm_(masm),
+ script_(script),
+ is_eval_(is_eval),
+ function_(NULL),
+ info_(NULL) {
}
- void VisitDeclarations(ZoneList<Declaration*>* declarations);
- void DeclareGlobals(Handle<FixedArray> pairs);
-
- // Platform-specific return sequence
- void EmitReturnSequence(int position);
-
- // Platform-specific code sequences for calls
- void EmitCallWithStub(Call* expr);
- void EmitCallWithIC(Call* expr, Handle<Object> name, RelocInfo::Mode mode);
-
- // Platform-specific code for loading variables.
- void EmitVariableLoad(Variable* expr, Expression::Context context);
-
- // Platform-specific support for compiling assignments.
-
- // Load a value from a named property.
- // The receiver is left on the stack by the IC.
- void EmitNamedPropertyLoad(Property* expr, Expression::Context context);
-
- // Load a value from a keyed property.
- // The receiver and the key is left on the stack by the IC.
- void EmitKeyedPropertyLoad(Property* expr, Expression::Context context);
-
- // Apply the compound assignment operator. Expects both operands on top
- // of the stack.
- void EmitCompoundAssignmentOp(Token::Value op, Expression::Context context);
-
- // Complete a variable assignment. The right-hand-side value is expected
- // on top of the stack.
- void EmitVariableAssignment(Variable* var, Expression::Context context);
-
- // Complete a named property assignment. The receiver and right-hand-side
- // value are expected on top of the stack.
- void EmitNamedPropertyAssignment(Assignment* expr);
-
- // Complete a keyed property assignment. The reciever, key, and
- // right-hand-side value are expected on top of the stack.
- void EmitKeyedPropertyAssignment(Assignment* expr);
-
- void SetFunctionPosition(FunctionLiteral* fun);
- void SetReturnPosition(FunctionLiteral* fun);
- void SetStatementPosition(Statement* stmt);
- void SetSourcePosition(int pos);
-
- // Non-local control flow support.
- void EnterFinallyBlock();
- void ExitFinallyBlock();
+ static Handle<Code> MakeCode(FunctionLiteral* fun,
+ Handle<Script> script,
+ bool is_eval,
+ CompilationInfo* info);
- // Loop nesting counter.
- int loop_depth() { return loop_depth_; }
- void increment_loop_depth() { loop_depth_++; }
- void decrement_loop_depth() {
- ASSERT(loop_depth_ > 0);
- loop_depth_--;
- }
+ void Generate(FunctionLiteral* fun, CompilationInfo* info);
+ private:
MacroAssembler* masm() { return masm_; }
- static Register result_register();
- static Register context_register();
+ FunctionLiteral* function() { return function_; }
+ Label* bailout() { return &bailout_; }
- // Set fields in the stack frame. Offsets are the frame pointer relative
- // offsets defined in, e.g., StandardFrameConstants.
- void StoreToFrameField(int frame_offset, Register value);
-
- // Load a value from the current context. Indices are defined as an enum
- // in v8::internal::Context.
- void LoadContextField(Register dst, int context_index);
+ bool has_receiver() { return !info_->receiver().is_null(); }
+ Handle<Object> receiver() { return info_->receiver(); }
+ bool has_this_properties() { return info_->has_this_properties(); }
// AST node visit functions.
#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
- // Handles the shortcutted logical binary operations in VisitBinaryOperation.
- void EmitLogicalOperation(BinaryOperation* expr);
+
+ // Emit code to load the receiver from the stack into a given register.
+ void EmitLoadReceiver(Register reg);
+
+ // Emit code to check that the receiver has the same map as the
+ // compile-time receiver. Receiver is expected in {ia32-edx, x64-rdx,
+ // arm-r1}. Emit a branch to the (single) bailout label if check fails.
+ void EmitReceiverMapCheck();
+
+ // Emit code to load a global variable value into {is32-eax, x64-rax,
+ // arm-r0}. Register {ia32-edx, x64-rdx, arm-r1} is preserved if it is
+ // holding the receiver and {is32-ecx, x64-rcx, arm-r2} is always
+ // clobbered.
+ void EmitGlobalVariableLoad(Handle<String> name);
+
+ // Emit a store to an own property of this. The stored value is expected
+ // in {ia32-eax, x64-rax, arm-r0} and the receiver in {is32-edx, x64-rdx,
+ // arm-r1}. Both are preserve.
+ void EmitThisPropertyStore(Handle<String> name);
MacroAssembler* masm_;
- FunctionLiteral* function_;
Handle<Script> script_;
bool is_eval_;
- Label return_label_;
- NestedStatement* nesting_stack_;
- int loop_depth_;
- Label* true_label_;
- Label* false_label_;
+ FunctionLiteral* function_;
+ CompilationInfo* info_;
- friend class NestedStatement;
+ Label bailout_;
DISALLOW_COPY_AND_ASSIGN(FastCodeGenerator);
};
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h
index f912fc524..b57f2cb62 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flag-definitions.h
@@ -143,12 +143,14 @@ DEFINE_bool(debug_info, true, "add debug information to compiled functions")
DEFINE_bool(strict, false, "strict error checking")
DEFINE_int(min_preparse_length, 1024,
"minimum length for automatic enable preparsing")
-DEFINE_bool(fast_compiler, true,
- "use the fast-mode compiler for some top-level code")
-DEFINE_bool(trace_bailout, false,
- "print reasons for failing to use fast compilation")
+DEFINE_bool(full_compiler, true, "enable dedicated backend for run-once code")
+DEFINE_bool(fast_compiler, false, "enable speculative optimizing backend")
+DEFINE_bool(always_full_compiler, false,
+ "try to use the dedicated run-once backend for all code")
DEFINE_bool(always_fast_compiler, false,
- "always try using the fast compiler")
+ "try to use the speculative optimizing backend for all code")
+DEFINE_bool(trace_bailout, false,
+ "print reasons for falling back to using the classic V8 backend")
// compilation-cache.cc
DEFINE_bool(compilation_cache, true, "enable compilation cache")
@@ -201,6 +203,11 @@ DEFINE_bool(canonicalize_object_literal_maps, true,
DEFINE_bool(use_big_map_space, true,
"Use big map space, but don't compact if it grew too big.")
+DEFINE_int(max_map_space_pages, MapSpace::kMaxMapPageIndex - 1,
+ "Maximum number of pages in map space which still allows to encode "
+ "forwarding pointers. That's actually a constant, but it's useful "
+ "to control it with a flag for better testing.")
+
// mksnapshot.cc
DEFINE_bool(h, false, "print this message")
DEFINE_bool(new_snapshot, true, "use new snapshot implementation")
@@ -294,6 +301,7 @@ DEFINE_string(stop_at, "", "function name where to insert a breakpoint")
// compiler.cc
DEFINE_bool(print_builtin_scopes, false, "print scopes for builtins")
DEFINE_bool(print_scopes, false, "print scopes")
+DEFINE_bool(print_ir, false, "print the AST as seen by the backend")
// contexts.cc
DEFINE_bool(trace_contexts, false, "trace contexts operations")
@@ -358,6 +366,8 @@ DEFINE_bool(log_code, false,
DEFINE_bool(log_gc, false,
"Log heap samples on garbage collection for the hp2ps tool.")
DEFINE_bool(log_handles, false, "Log global handle events.")
+DEFINE_bool(log_snapshot_positions, false,
+ "log positions of (de)serialized objects in the snapshot.")
DEFINE_bool(log_state_changes, false, "Log state changes.")
DEFINE_bool(log_suspect, false, "Log suspect operations.")
DEFINE_bool(log_producers, false, "Log stack traces of JS objects allocations.")
diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc
index 2f90a316e..e56a2c83e 100644
--- a/deps/v8/src/frames.cc
+++ b/deps/v8/src/frames.cc
@@ -176,7 +176,7 @@ StackFrame* StackFrameIterator::SingletonFor(StackFrame::Type type) {
StackTraceFrameIterator::StackTraceFrameIterator() {
- if (!done() && !frame()->function()->IsJSFunction()) Advance();
+ if (!done() && !IsValidFrame()) Advance();
}
@@ -184,10 +184,18 @@ void StackTraceFrameIterator::Advance() {
while (true) {
JavaScriptFrameIterator::Advance();
if (done()) return;
- if (frame()->function()->IsJSFunction()) return;
+ if (IsValidFrame()) return;
}
}
+bool StackTraceFrameIterator::IsValidFrame() {
+ if (!frame()->function()->IsJSFunction()) return false;
+ Object* script = JSFunction::cast(frame()->function())->shared()->script();
+ // Don't show functions from native scripts to user.
+ return (script->IsScript() &&
+ Script::TYPE_NATIVE != Script::cast(script)->type()->value());
+}
+
// -------------------------------------------------------------------------
@@ -402,7 +410,7 @@ Object*& ExitFrame::code_slot() const {
Code* ExitFrame::code() const {
Object* code = code_slot();
if (code->IsSmi()) {
- return Heap::c_entry_debug_break_code();
+ return Heap::debugger_statement_code();
} else {
return Code::cast(code);
}
diff --git a/deps/v8/src/frames.h b/deps/v8/src/frames.h
index 024065abf..8cbbc6267 100644
--- a/deps/v8/src/frames.h
+++ b/deps/v8/src/frames.h
@@ -589,6 +589,9 @@ class StackTraceFrameIterator: public JavaScriptFrameIterator {
public:
StackTraceFrameIterator();
void Advance();
+
+ private:
+ bool IsValidFrame();
};
@@ -607,11 +610,12 @@ class SafeStackFrameIterator BASE_EMBEDDED {
void Advance();
void Reset();
- private:
static bool IsWithinBounds(
Address low_bound, Address high_bound, Address addr) {
return low_bound <= addr && addr <= high_bound;
}
+
+ private:
bool IsValidStackAddress(Address addr) const {
return IsWithinBounds(low_bound_, high_bound_, addr);
}
diff --git a/deps/v8/src/full-codegen.cc b/deps/v8/src/full-codegen.cc
new file mode 100644
index 000000000..01714cbb1
--- /dev/null
+++ b/deps/v8/src/full-codegen.cc
@@ -0,0 +1,1155 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "compiler.h"
+#include "full-codegen.h"
+#include "stub-cache.h"
+#include "debug.h"
+
+namespace v8 {
+namespace internal {
+
+#define BAILOUT(reason) \
+ do { \
+ if (FLAG_trace_bailout) { \
+ PrintF("%s\n", reason); \
+ } \
+ has_supported_syntax_ = false; \
+ return; \
+ } while (false)
+
+
+#define CHECK_BAILOUT \
+ do { \
+ if (!has_supported_syntax_) return; \
+ } while (false)
+
+
+void FullCodeGenSyntaxChecker::Check(FunctionLiteral* fun) {
+ Scope* scope = fun->scope();
+ VisitDeclarations(scope->declarations());
+ CHECK_BAILOUT;
+
+ VisitStatements(fun->body());
+}
+
+
+void FullCodeGenSyntaxChecker::VisitDeclarations(
+ ZoneList<Declaration*>* decls) {
+ for (int i = 0; i < decls->length(); i++) {
+ Visit(decls->at(i));
+ CHECK_BAILOUT;
+ }
+}
+
+
+void FullCodeGenSyntaxChecker::VisitStatements(ZoneList<Statement*>* stmts) {
+ for (int i = 0, len = stmts->length(); i < len; i++) {
+ Visit(stmts->at(i));
+ CHECK_BAILOUT;
+ }
+}
+
+
+void FullCodeGenSyntaxChecker::VisitDeclaration(Declaration* decl) {
+ Property* prop = decl->proxy()->AsProperty();
+ if (prop != NULL) {
+ Visit(prop->obj());
+ Visit(prop->key());
+ }
+
+ if (decl->fun() != NULL) {
+ Visit(decl->fun());
+ }
+}
+
+
+void FullCodeGenSyntaxChecker::VisitBlock(Block* stmt) {
+ VisitStatements(stmt->statements());
+}
+
+
+void FullCodeGenSyntaxChecker::VisitExpressionStatement(
+ ExpressionStatement* stmt) {
+ Visit(stmt->expression());
+}
+
+
+void FullCodeGenSyntaxChecker::VisitEmptyStatement(EmptyStatement* stmt) {
+ // Supported.
+}
+
+
+void FullCodeGenSyntaxChecker::VisitIfStatement(IfStatement* stmt) {
+ Visit(stmt->condition());
+ CHECK_BAILOUT;
+ Visit(stmt->then_statement());
+ CHECK_BAILOUT;
+ Visit(stmt->else_statement());
+}
+
+
+void FullCodeGenSyntaxChecker::VisitContinueStatement(ContinueStatement* stmt) {
+ // Supported.
+}
+
+
+void FullCodeGenSyntaxChecker::VisitBreakStatement(BreakStatement* stmt) {
+ // Supported.
+}
+
+
+void FullCodeGenSyntaxChecker::VisitReturnStatement(ReturnStatement* stmt) {
+ Visit(stmt->expression());
+}
+
+
+void FullCodeGenSyntaxChecker::VisitWithEnterStatement(
+ WithEnterStatement* stmt) {
+ Visit(stmt->expression());
+}
+
+
+void FullCodeGenSyntaxChecker::VisitWithExitStatement(WithExitStatement* stmt) {
+ // Supported.
+}
+
+
+void FullCodeGenSyntaxChecker::VisitSwitchStatement(SwitchStatement* stmt) {
+ BAILOUT("SwitchStatement");
+}
+
+
+void FullCodeGenSyntaxChecker::VisitDoWhileStatement(DoWhileStatement* stmt) {
+ Visit(stmt->cond());
+ CHECK_BAILOUT;
+ Visit(stmt->body());
+}
+
+
+void FullCodeGenSyntaxChecker::VisitWhileStatement(WhileStatement* stmt) {
+ Visit(stmt->cond());
+ CHECK_BAILOUT;
+ Visit(stmt->body());
+}
+
+
+void FullCodeGenSyntaxChecker::VisitForStatement(ForStatement* stmt) {
+ if (!FLAG_always_full_compiler) BAILOUT("ForStatement");
+ if (stmt->init() != NULL) {
+ Visit(stmt->init());
+ CHECK_BAILOUT;
+ }
+ if (stmt->cond() != NULL) {
+ Visit(stmt->cond());
+ CHECK_BAILOUT;
+ }
+ Visit(stmt->body());
+ if (stmt->next() != NULL) {
+ CHECK_BAILOUT;
+ Visit(stmt->next());
+ }
+}
+
+
+void FullCodeGenSyntaxChecker::VisitForInStatement(ForInStatement* stmt) {
+ BAILOUT("ForInStatement");
+}
+
+
+void FullCodeGenSyntaxChecker::VisitTryCatchStatement(TryCatchStatement* stmt) {
+ Visit(stmt->try_block());
+ CHECK_BAILOUT;
+ Visit(stmt->catch_block());
+}
+
+
+void FullCodeGenSyntaxChecker::VisitTryFinallyStatement(
+ TryFinallyStatement* stmt) {
+ Visit(stmt->try_block());
+ CHECK_BAILOUT;
+ Visit(stmt->finally_block());
+}
+
+
+void FullCodeGenSyntaxChecker::VisitDebuggerStatement(
+ DebuggerStatement* stmt) {
+ // Supported.
+}
+
+
+void FullCodeGenSyntaxChecker::VisitFunctionLiteral(FunctionLiteral* expr) {
+ // Supported.
+}
+
+
+void FullCodeGenSyntaxChecker::VisitFunctionBoilerplateLiteral(
+ FunctionBoilerplateLiteral* expr) {
+ BAILOUT("FunctionBoilerplateLiteral");
+}
+
+
+void FullCodeGenSyntaxChecker::VisitConditional(Conditional* expr) {
+ Visit(expr->condition());
+ CHECK_BAILOUT;
+ Visit(expr->then_expression());
+ CHECK_BAILOUT;
+ Visit(expr->else_expression());
+}
+
+
+void FullCodeGenSyntaxChecker::VisitSlot(Slot* expr) {
+ UNREACHABLE();
+}
+
+
+void FullCodeGenSyntaxChecker::VisitVariableProxy(VariableProxy* expr) {
+ // Supported.
+}
+
+
+void FullCodeGenSyntaxChecker::VisitLiteral(Literal* expr) {
+ // Supported.
+}
+
+
+void FullCodeGenSyntaxChecker::VisitRegExpLiteral(RegExpLiteral* expr) {
+ // Supported.
+}
+
+
+void FullCodeGenSyntaxChecker::VisitObjectLiteral(ObjectLiteral* expr) {
+ ZoneList<ObjectLiteral::Property*>* properties = expr->properties();
+
+ for (int i = 0, len = properties->length(); i < len; i++) {
+ ObjectLiteral::Property* property = properties->at(i);
+ if (property->IsCompileTimeValue()) continue;
+ Visit(property->key());
+ CHECK_BAILOUT;
+ Visit(property->value());
+ CHECK_BAILOUT;
+ }
+}
+
+
+void FullCodeGenSyntaxChecker::VisitArrayLiteral(ArrayLiteral* expr) {
+ ZoneList<Expression*>* subexprs = expr->values();
+ for (int i = 0, len = subexprs->length(); i < len; i++) {
+ Expression* subexpr = subexprs->at(i);
+ if (subexpr->AsLiteral() != NULL) continue;
+ if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
+ Visit(subexpr);
+ CHECK_BAILOUT;
+ }
+}
+
+
+void FullCodeGenSyntaxChecker::VisitCatchExtensionObject(
+ CatchExtensionObject* expr) {
+ Visit(expr->key());
+ CHECK_BAILOUT;
+ Visit(expr->value());
+}
+
+
+void FullCodeGenSyntaxChecker::VisitAssignment(Assignment* expr) {
+ Token::Value op = expr->op();
+ if (op == Token::INIT_CONST) BAILOUT("initialize constant");
+
+ Variable* var = expr->target()->AsVariableProxy()->AsVariable();
+ Property* prop = expr->target()->AsProperty();
+ ASSERT(var == NULL || prop == NULL);
+ if (var != NULL) {
+ if (var->mode() == Variable::CONST) BAILOUT("Assignment to const");
+ // All other variables are supported.
+ } else if (prop != NULL) {
+ Visit(prop->obj());
+ CHECK_BAILOUT;
+ Visit(prop->key());
+ CHECK_BAILOUT;
+ } else {
+ // This is a throw reference error.
+ BAILOUT("non-variable/non-property assignment");
+ }
+
+ Visit(expr->value());
+}
+
+
+void FullCodeGenSyntaxChecker::VisitThrow(Throw* expr) {
+ Visit(expr->exception());
+}
+
+
+void FullCodeGenSyntaxChecker::VisitProperty(Property* expr) {
+ Visit(expr->obj());
+ CHECK_BAILOUT;
+ Visit(expr->key());
+}
+
+
+void FullCodeGenSyntaxChecker::VisitCall(Call* expr) {
+ Expression* fun = expr->expression();
+ ZoneList<Expression*>* args = expr->arguments();
+ Variable* var = fun->AsVariableProxy()->AsVariable();
+
+ // Check for supported calls
+ if (var != NULL && var->is_possibly_eval()) {
+ BAILOUT("call to the identifier 'eval'");
+ } else if (var != NULL && !var->is_this() && var->is_global()) {
+ // Calls to global variables are supported.
+ } else if (var != NULL && var->slot() != NULL &&
+ var->slot()->type() == Slot::LOOKUP) {
+ BAILOUT("call to a lookup slot");
+ } else if (fun->AsProperty() != NULL) {
+ Property* prop = fun->AsProperty();
+ Visit(prop->obj());
+ CHECK_BAILOUT;
+ Visit(prop->key());
+ CHECK_BAILOUT;
+ } else {
+ // Otherwise the call is supported if the function expression is.
+ Visit(fun);
+ }
+ // Check all arguments to the call.
+ for (int i = 0; i < args->length(); i++) {
+ Visit(args->at(i));
+ CHECK_BAILOUT;
+ }
+}
+
+
+void FullCodeGenSyntaxChecker::VisitCallNew(CallNew* expr) {
+ Visit(expr->expression());
+ CHECK_BAILOUT;
+ ZoneList<Expression*>* args = expr->arguments();
+ // Check all arguments to the call
+ for (int i = 0; i < args->length(); i++) {
+ Visit(args->at(i));
+ CHECK_BAILOUT;
+ }
+}
+
+
+void FullCodeGenSyntaxChecker::VisitCallRuntime(CallRuntime* expr) {
+ // Check for inline runtime call
+ if (expr->name()->Get(0) == '_' &&
+ CodeGenerator::FindInlineRuntimeLUT(expr->name()) != NULL) {
+ BAILOUT("inlined runtime call");
+ }
+ // Check all arguments to the call. (Relies on TEMP meaning STACK.)
+ for (int i = 0; i < expr->arguments()->length(); i++) {
+ Visit(expr->arguments()->at(i));
+ CHECK_BAILOUT;
+ }
+}
+
+
+void FullCodeGenSyntaxChecker::VisitUnaryOperation(UnaryOperation* expr) {
+ switch (expr->op()) {
+ case Token::ADD:
+ case Token::BIT_NOT:
+ case Token::NOT:
+ case Token::SUB:
+ case Token::TYPEOF:
+ case Token::VOID:
+ Visit(expr->expression());
+ break;
+ case Token::DELETE:
+ BAILOUT("UnaryOperation: DELETE");
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void FullCodeGenSyntaxChecker::VisitCountOperation(CountOperation* expr) {
+ Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
+ Property* prop = expr->expression()->AsProperty();
+ ASSERT(var == NULL || prop == NULL);
+ if (var != NULL) {
+ // All global variables are supported.
+ if (!var->is_global()) {
+ ASSERT(var->slot() != NULL);
+ Slot::Type type = var->slot()->type();
+ if (type == Slot::LOOKUP) {
+ BAILOUT("CountOperation with lookup slot");
+ }
+ }
+ } else if (prop != NULL) {
+ Visit(prop->obj());
+ CHECK_BAILOUT;
+ Visit(prop->key());
+ CHECK_BAILOUT;
+ } else {
+ // This is a throw reference error.
+ BAILOUT("CountOperation non-variable/non-property expression");
+ }
+}
+
+
+void FullCodeGenSyntaxChecker::VisitBinaryOperation(BinaryOperation* expr) {
+ Visit(expr->left());
+ CHECK_BAILOUT;
+ Visit(expr->right());
+}
+
+
+void FullCodeGenSyntaxChecker::VisitCompareOperation(CompareOperation* expr) {
+ Visit(expr->left());
+ CHECK_BAILOUT;
+ Visit(expr->right());
+}
+
+
+void FullCodeGenSyntaxChecker::VisitThisFunction(ThisFunction* expr) {
+ // Supported.
+}
+
+#undef BAILOUT
+#undef CHECK_BAILOUT
+
+
+#define __ ACCESS_MASM(masm())
+
+Handle<Code> FullCodeGenerator::MakeCode(FunctionLiteral* fun,
+ Handle<Script> script,
+ bool is_eval) {
+ if (!script->IsUndefined() && !script->source()->IsUndefined()) {
+ int len = String::cast(script->source())->length();
+ Counters::total_full_codegen_source_size.Increment(len);
+ }
+ CodeGenerator::MakeCodePrologue(fun);
+ const int kInitialBufferSize = 4 * KB;
+ MacroAssembler masm(NULL, kInitialBufferSize);
+ FullCodeGenerator cgen(&masm, script, is_eval);
+ cgen.Generate(fun, PRIMARY);
+ if (cgen.HasStackOverflow()) {
+ ASSERT(!Top::has_pending_exception());
+ return Handle<Code>::null();
+ }
+ Code::Flags flags = Code::ComputeFlags(Code::FUNCTION, NOT_IN_LOOP);
+ return CodeGenerator::MakeCodeEpilogue(fun, &masm, flags, script);
+}
+
+
+int FullCodeGenerator::SlotOffset(Slot* slot) {
+ ASSERT(slot != NULL);
+ // Offset is negative because higher indexes are at lower addresses.
+ int offset = -slot->index() * kPointerSize;
+ // Adjust by a (parameter or local) base offset.
+ switch (slot->type()) {
+ case Slot::PARAMETER:
+ offset += (function_->scope()->num_parameters() + 1) * kPointerSize;
+ break;
+ case Slot::LOCAL:
+ offset += JavaScriptFrameConstants::kLocal0Offset;
+ break;
+ case Slot::CONTEXT:
+ case Slot::LOOKUP:
+ UNREACHABLE();
+ }
+ return offset;
+}
+
+
+void FullCodeGenerator::VisitDeclarations(
+ ZoneList<Declaration*>* declarations) {
+ int length = declarations->length();
+ int globals = 0;
+ for (int i = 0; i < length; i++) {
+ Declaration* decl = declarations->at(i);
+ Variable* var = decl->proxy()->var();
+ Slot* slot = var->slot();
+
+ // If it was not possible to allocate the variable at compile
+ // time, we need to "declare" it at runtime to make sure it
+ // actually exists in the local context.
+ if ((slot != NULL && slot->type() == Slot::LOOKUP) || !var->is_global()) {
+ VisitDeclaration(decl);
+ } else {
+ // Count global variables and functions for later processing
+ globals++;
+ }
+ }
+
+ // Compute array of global variable and function declarations.
+ // Do nothing in case of no declared global functions or variables.
+ if (globals > 0) {
+ Handle<FixedArray> array = Factory::NewFixedArray(2 * globals, TENURED);
+ for (int j = 0, i = 0; i < length; i++) {
+ Declaration* decl = declarations->at(i);
+ Variable* var = decl->proxy()->var();
+ Slot* slot = var->slot();
+
+ if ((slot == NULL || slot->type() != Slot::LOOKUP) && var->is_global()) {
+ array->set(j++, *(var->name()));
+ if (decl->fun() == NULL) {
+ if (var->mode() == Variable::CONST) {
+ // In case this is const property use the hole.
+ array->set_the_hole(j++);
+ } else {
+ array->set_undefined(j++);
+ }
+ } else {
+ Handle<JSFunction> function =
+ Compiler::BuildBoilerplate(decl->fun(), script_, this);
+ // Check for stack-overflow exception.
+ if (HasStackOverflow()) return;
+ array->set(j++, *function);
+ }
+ }
+ }
+ // Invoke the platform-dependent code generator to do the actual
+ // declaration the global variables and functions.
+ DeclareGlobals(array);
+ }
+}
+
+
+void FullCodeGenerator::SetFunctionPosition(FunctionLiteral* fun) {
+ if (FLAG_debug_info) {
+ CodeGenerator::RecordPositions(masm_, fun->start_position());
+ }
+}
+
+
+void FullCodeGenerator::SetReturnPosition(FunctionLiteral* fun) {
+ if (FLAG_debug_info) {
+ CodeGenerator::RecordPositions(masm_, fun->end_position());
+ }
+}
+
+
+void FullCodeGenerator::SetStatementPosition(Statement* stmt) {
+ if (FLAG_debug_info) {
+ CodeGenerator::RecordPositions(masm_, stmt->statement_pos());
+ }
+}
+
+
+void FullCodeGenerator::SetStatementPosition(int pos) {
+ if (FLAG_debug_info) {
+ CodeGenerator::RecordPositions(masm_, pos);
+ }
+}
+
+
+void FullCodeGenerator::SetSourcePosition(int pos) {
+ if (FLAG_debug_info && pos != RelocInfo::kNoPosition) {
+ masm_->RecordPosition(pos);
+ }
+}
+
+
+void FullCodeGenerator::EmitLogicalOperation(BinaryOperation* expr) {
+ Label eval_right, done;
+
+ // Set up the appropriate context for the left subexpression based
+ // on the operation and our own context. Initially assume we can
+ // inherit both true and false labels from our context.
+ if (expr->op() == Token::OR) {
+ switch (context_) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ VisitForControl(expr->left(), &done, &eval_right);
+ break;
+ case Expression::kValue:
+ VisitForValueControl(expr->left(),
+ location_,
+ &done,
+ &eval_right);
+ break;
+ case Expression::kTest:
+ VisitForControl(expr->left(), true_label_, &eval_right);
+ break;
+ case Expression::kValueTest:
+ VisitForValueControl(expr->left(),
+ location_,
+ true_label_,
+ &eval_right);
+ break;
+ case Expression::kTestValue:
+ VisitForControl(expr->left(), true_label_, &eval_right);
+ break;
+ }
+ } else {
+ ASSERT_EQ(Token::AND, expr->op());
+ switch (context_) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ VisitForControl(expr->left(), &eval_right, &done);
+ break;
+ case Expression::kValue:
+ VisitForControlValue(expr->left(),
+ location_,
+ &eval_right,
+ &done);
+ break;
+ case Expression::kTest:
+ VisitForControl(expr->left(), &eval_right, false_label_);
+ break;
+ case Expression::kValueTest:
+ VisitForControl(expr->left(), &eval_right, false_label_);
+ break;
+ case Expression::kTestValue:
+ VisitForControlValue(expr->left(),
+ location_,
+ &eval_right,
+ false_label_);
+ break;
+ }
+ }
+
+ __ bind(&eval_right);
+ Visit(expr->right());
+
+ __ bind(&done);
+}
+
+
+void FullCodeGenerator::VisitBlock(Block* stmt) {
+ Comment cmnt(masm_, "[ Block");
+ Breakable nested_statement(this, stmt);
+ SetStatementPosition(stmt);
+ VisitStatements(stmt->statements());
+ __ bind(nested_statement.break_target());
+}
+
+
+void FullCodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) {
+ Comment cmnt(masm_, "[ ExpressionStatement");
+ SetStatementPosition(stmt);
+ VisitForEffect(stmt->expression());
+}
+
+
+void FullCodeGenerator::VisitEmptyStatement(EmptyStatement* stmt) {
+ Comment cmnt(masm_, "[ EmptyStatement");
+ SetStatementPosition(stmt);
+}
+
+
+void FullCodeGenerator::VisitIfStatement(IfStatement* stmt) {
+ Comment cmnt(masm_, "[ IfStatement");
+ SetStatementPosition(stmt);
+ Label then_part, else_part, done;
+
+ // Do not worry about optimizing for empty then or else bodies.
+ VisitForControl(stmt->condition(), &then_part, &else_part);
+
+ __ bind(&then_part);
+ Visit(stmt->then_statement());
+ __ jmp(&done);
+
+ __ bind(&else_part);
+ Visit(stmt->else_statement());
+
+ __ bind(&done);
+}
+
+
+void FullCodeGenerator::VisitContinueStatement(ContinueStatement* stmt) {
+ Comment cmnt(masm_, "[ ContinueStatement");
+ SetStatementPosition(stmt);
+ NestedStatement* current = nesting_stack_;
+ int stack_depth = 0;
+ while (!current->IsContinueTarget(stmt->target())) {
+ stack_depth = current->Exit(stack_depth);
+ current = current->outer();
+ }
+ __ Drop(stack_depth);
+
+ Iteration* loop = current->AsIteration();
+ __ jmp(loop->continue_target());
+}
+
+
+void FullCodeGenerator::VisitBreakStatement(BreakStatement* stmt) {
+ Comment cmnt(masm_, "[ BreakStatement");
+ SetStatementPosition(stmt);
+ NestedStatement* current = nesting_stack_;
+ int stack_depth = 0;
+ while (!current->IsBreakTarget(stmt->target())) {
+ stack_depth = current->Exit(stack_depth);
+ current = current->outer();
+ }
+ __ Drop(stack_depth);
+
+ Breakable* target = current->AsBreakable();
+ __ jmp(target->break_target());
+}
+
+
+void FullCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
+ Comment cmnt(masm_, "[ ReturnStatement");
+ SetStatementPosition(stmt);
+ Expression* expr = stmt->expression();
+ VisitForValue(expr, kAccumulator);
+
+ // Exit all nested statements.
+ NestedStatement* current = nesting_stack_;
+ int stack_depth = 0;
+ while (current != NULL) {
+ stack_depth = current->Exit(stack_depth);
+ current = current->outer();
+ }
+ __ Drop(stack_depth);
+
+ EmitReturnSequence(stmt->statement_pos());
+}
+
+
+void FullCodeGenerator::VisitWithEnterStatement(WithEnterStatement* stmt) {
+ Comment cmnt(masm_, "[ WithEnterStatement");
+ SetStatementPosition(stmt);
+
+ VisitForValue(stmt->expression(), kStack);
+ if (stmt->is_catch_block()) {
+ __ CallRuntime(Runtime::kPushCatchContext, 1);
+ } else {
+ __ CallRuntime(Runtime::kPushContext, 1);
+ }
+ // Both runtime calls return the new context in both the context and the
+ // result registers.
+
+ // Update local stack frame context field.
+ StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
+}
+
+
+void FullCodeGenerator::VisitWithExitStatement(WithExitStatement* stmt) {
+ Comment cmnt(masm_, "[ WithExitStatement");
+ SetStatementPosition(stmt);
+
+ // Pop context.
+ LoadContextField(context_register(), Context::PREVIOUS_INDEX);
+ // Update local stack frame context field.
+ StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
+}
+
+
+void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void FullCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
+ Comment cmnt(masm_, "[ DoWhileStatement");
+ SetStatementPosition(stmt);
+ Label body, stack_limit_hit, stack_check_success;
+
+ Iteration loop_statement(this, stmt);
+ increment_loop_depth();
+
+ __ bind(&body);
+ Visit(stmt->body());
+
+ // Check stack before looping.
+ __ StackLimitCheck(&stack_limit_hit);
+ __ bind(&stack_check_success);
+
+ __ bind(loop_statement.continue_target());
+ SetStatementPosition(stmt->condition_position());
+ VisitForControl(stmt->cond(), &body, loop_statement.break_target());
+
+ __ bind(&stack_limit_hit);
+ StackCheckStub stack_stub;
+ __ CallStub(&stack_stub);
+ __ jmp(&stack_check_success);
+
+ __ bind(loop_statement.break_target());
+
+ decrement_loop_depth();
+}
+
+
+void FullCodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
+ Comment cmnt(masm_, "[ WhileStatement");
+ SetStatementPosition(stmt);
+ Label body, stack_limit_hit, stack_check_success;
+
+ Iteration loop_statement(this, stmt);
+ increment_loop_depth();
+
+ // Emit the test at the bottom of the loop.
+ __ jmp(loop_statement.continue_target());
+
+ __ bind(&body);
+ Visit(stmt->body());
+
+ __ bind(loop_statement.continue_target());
+ // Check stack before looping.
+ __ StackLimitCheck(&stack_limit_hit);
+ __ bind(&stack_check_success);
+
+ VisitForControl(stmt->cond(), &body, loop_statement.break_target());
+
+ __ bind(&stack_limit_hit);
+ StackCheckStub stack_stub;
+ __ CallStub(&stack_stub);
+ __ jmp(&stack_check_success);
+
+ __ bind(loop_statement.break_target());
+ decrement_loop_depth();
+}
+
+
+void FullCodeGenerator::VisitForStatement(ForStatement* stmt) {
+ Comment cmnt(masm_, "[ ForStatement");
+ SetStatementPosition(stmt);
+ Label test, body, stack_limit_hit, stack_check_success;
+
+ Iteration loop_statement(this, stmt);
+ if (stmt->init() != NULL) {
+ Visit(stmt->init());
+ }
+
+ increment_loop_depth();
+ // Emit the test at the bottom of the loop (even if empty).
+ __ jmp(&test);
+
+ __ bind(&body);
+ Visit(stmt->body());
+
+ __ bind(loop_statement.continue_target());
+
+ SetStatementPosition(stmt);
+ if (stmt->next() != NULL) {
+ Visit(stmt->next());
+ }
+
+ __ bind(&test);
+
+ // Check stack before looping.
+ __ StackLimitCheck(&stack_limit_hit);
+ __ bind(&stack_check_success);
+
+ if (stmt->cond() != NULL) {
+ VisitForControl(stmt->cond(), &body, loop_statement.break_target());
+ } else {
+ __ jmp(&body);
+ }
+
+ __ bind(&stack_limit_hit);
+ StackCheckStub stack_stub;
+ __ CallStub(&stack_stub);
+ __ jmp(&stack_check_success);
+
+ __ bind(loop_statement.break_target());
+ decrement_loop_depth();
+}
+
+
+void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void FullCodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
+ Comment cmnt(masm_, "[ TryCatchStatement");
+ SetStatementPosition(stmt);
+ // The try block adds a handler to the exception handler chain
+ // before entering, and removes it again when exiting normally.
+ // If an exception is thrown during execution of the try block,
+ // control is passed to the handler, which also consumes the handler.
+ // At this point, the exception is in a register, and store it in
+ // the temporary local variable (prints as ".catch-var") before
+ // executing the catch block. The catch block has been rewritten
+ // to introduce a new scope to bind the catch variable and to remove
+ // that scope again afterwards.
+
+ Label try_handler_setup, catch_entry, done;
+ __ Call(&try_handler_setup);
+ // Try handler code, exception in result register.
+
+ // Store exception in local .catch variable before executing catch block.
+ {
+ // The catch variable is *always* a variable proxy for a local variable.
+ Variable* catch_var = stmt->catch_var()->AsVariableProxy()->AsVariable();
+ ASSERT_NOT_NULL(catch_var);
+ Slot* variable_slot = catch_var->slot();
+ ASSERT_NOT_NULL(variable_slot);
+ ASSERT_EQ(Slot::LOCAL, variable_slot->type());
+ StoreToFrameField(SlotOffset(variable_slot), result_register());
+ }
+
+ Visit(stmt->catch_block());
+ __ jmp(&done);
+
+ // Try block code. Sets up the exception handler chain.
+ __ bind(&try_handler_setup);
+ {
+ TryCatch try_block(this, &catch_entry);
+ __ PushTryHandler(IN_JAVASCRIPT, TRY_CATCH_HANDLER);
+ Visit(stmt->try_block());
+ __ PopTryHandler();
+ }
+ __ bind(&done);
+}
+
+
+void FullCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
+ Comment cmnt(masm_, "[ TryFinallyStatement");
+ SetStatementPosition(stmt);
+ // Try finally is compiled by setting up a try-handler on the stack while
+ // executing the try body, and removing it again afterwards.
+ //
+ // The try-finally construct can enter the finally block in three ways:
+ // 1. By exiting the try-block normally. This removes the try-handler and
+ // calls the finally block code before continuing.
+ // 2. By exiting the try-block with a function-local control flow transfer
+ // (break/continue/return). The site of the, e.g., break removes the
+ // try handler and calls the finally block code before continuing
+ // its outward control transfer.
+ // 3. by exiting the try-block with a thrown exception.
+ // This can happen in nested function calls. It traverses the try-handler
+ // chain and consumes the try-handler entry before jumping to the
+ // handler code. The handler code then calls the finally-block before
+ // rethrowing the exception.
+ //
+ // The finally block must assume a return address on top of the stack
+ // (or in the link register on ARM chips) and a value (return value or
+ // exception) in the result register (rax/eax/r0), both of which must
+ // be preserved. The return address isn't GC-safe, so it should be
+ // cooked before GC.
+ Label finally_entry;
+ Label try_handler_setup;
+
+ // Setup the try-handler chain. Use a call to
+ // Jump to try-handler setup and try-block code. Use call to put try-handler
+ // address on stack.
+ __ Call(&try_handler_setup);
+ // Try handler code. Return address of call is pushed on handler stack.
+ {
+ // This code is only executed during stack-handler traversal when an
+ // exception is thrown. The execption is in the result register, which
+ // is retained by the finally block.
+ // Call the finally block and then rethrow the exception.
+ __ Call(&finally_entry);
+ __ push(result_register());
+ __ CallRuntime(Runtime::kReThrow, 1);
+ }
+
+ __ bind(&finally_entry);
+ {
+ // Finally block implementation.
+ Finally finally_block(this);
+ EnterFinallyBlock();
+ Visit(stmt->finally_block());
+ ExitFinallyBlock(); // Return to the calling code.
+ }
+
+ __ bind(&try_handler_setup);
+ {
+ // Setup try handler (stack pointer registers).
+ TryFinally try_block(this, &finally_entry);
+ __ PushTryHandler(IN_JAVASCRIPT, TRY_FINALLY_HANDLER);
+ Visit(stmt->try_block());
+ __ PopTryHandler();
+ }
+ // Execute the finally block on the way out.
+ __ Call(&finally_entry);
+}
+
+
+void FullCodeGenerator::VisitDebuggerStatement(DebuggerStatement* stmt) {
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ Comment cmnt(masm_, "[ DebuggerStatement");
+ SetStatementPosition(stmt);
+
+ DebuggerStatementStub ces;
+ __ CallStub(&ces);
+ // Ignore the return value.
+#endif
+}
+
+
+void FullCodeGenerator::VisitFunctionBoilerplateLiteral(
+ FunctionBoilerplateLiteral* expr) {
+ UNREACHABLE();
+}
+
+
+void FullCodeGenerator::VisitConditional(Conditional* expr) {
+ Comment cmnt(masm_, "[ Conditional");
+ Label true_case, false_case, done;
+ VisitForControl(expr->condition(), &true_case, &false_case);
+
+ __ bind(&true_case);
+ Visit(expr->then_expression());
+ // If control flow falls through Visit, jump to done.
+ if (context_ == Expression::kEffect || context_ == Expression::kValue) {
+ __ jmp(&done);
+ }
+
+ __ bind(&false_case);
+ Visit(expr->else_expression());
+ // If control flow falls through Visit, merge it with true case here.
+ if (context_ == Expression::kEffect || context_ == Expression::kValue) {
+ __ bind(&done);
+ }
+}
+
+
+void FullCodeGenerator::VisitSlot(Slot* expr) {
+ // Slots do not appear directly in the AST.
+ UNREACHABLE();
+}
+
+
+void FullCodeGenerator::VisitLiteral(Literal* expr) {
+ Comment cmnt(masm_, "[ Literal");
+ Apply(context_, expr);
+}
+
+
+void FullCodeGenerator::VisitAssignment(Assignment* expr) {
+ Comment cmnt(masm_, "[ Assignment");
+ ASSERT(expr->op() != Token::INIT_CONST);
+ // Left-hand side can only be a property, a global or a (parameter or local)
+ // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+ enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+ LhsKind assign_type = VARIABLE;
+ Property* prop = expr->target()->AsProperty();
+ if (prop != NULL) {
+ assign_type =
+ (prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
+ }
+
+ // Evaluate LHS expression.
+ switch (assign_type) {
+ case VARIABLE:
+ // Nothing to do here.
+ break;
+ case NAMED_PROPERTY:
+ VisitForValue(prop->obj(), kStack);
+ break;
+ case KEYED_PROPERTY:
+ VisitForValue(prop->obj(), kStack);
+ VisitForValue(prop->key(), kStack);
+ break;
+ }
+
+ // If we have a compound assignment: Get value of LHS expression and
+ // store in on top of the stack.
+ if (expr->is_compound()) {
+ Location saved_location = location_;
+ location_ = kStack;
+ switch (assign_type) {
+ case VARIABLE:
+ EmitVariableLoad(expr->target()->AsVariableProxy()->var(),
+ Expression::kValue);
+ break;
+ case NAMED_PROPERTY:
+ EmitNamedPropertyLoad(prop);
+ __ push(result_register());
+ break;
+ case KEYED_PROPERTY:
+ EmitKeyedPropertyLoad(prop);
+ __ push(result_register());
+ break;
+ }
+ location_ = saved_location;
+ }
+
+ // Evaluate RHS expression.
+ Expression* rhs = expr->value();
+ VisitForValue(rhs, kAccumulator);
+
+ // If we have a compound assignment: Apply operator.
+ if (expr->is_compound()) {
+ Location saved_location = location_;
+ location_ = kAccumulator;
+ EmitBinaryOp(expr->binary_op(), Expression::kValue);
+ location_ = saved_location;
+ }
+
+ // Record source position before possible IC call.
+ SetSourcePosition(expr->position());
+
+ // Store the value.
+ switch (assign_type) {
+ case VARIABLE:
+ EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
+ context_);
+ break;
+ case NAMED_PROPERTY:
+ EmitNamedPropertyAssignment(expr);
+ break;
+ case KEYED_PROPERTY:
+ EmitKeyedPropertyAssignment(expr);
+ break;
+ }
+}
+
+
+void FullCodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* expr) {
+ // Call runtime routine to allocate the catch extension object and
+ // assign the exception value to the catch variable.
+ Comment cmnt(masm_, "[ CatchExtensionObject");
+ VisitForValue(expr->key(), kStack);
+ VisitForValue(expr->value(), kStack);
+ // Create catch extension object.
+ __ CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
+ Apply(context_, result_register());
+}
+
+
+void FullCodeGenerator::VisitThrow(Throw* expr) {
+ Comment cmnt(masm_, "[ Throw");
+ VisitForValue(expr->exception(), kStack);
+ __ CallRuntime(Runtime::kThrow, 1);
+ // Never returns here.
+}
+
+
+int FullCodeGenerator::TryFinally::Exit(int stack_depth) {
+ // The macros used here must preserve the result register.
+ __ Drop(stack_depth);
+ __ PopTryHandler();
+ __ Call(finally_entry_);
+ return 0;
+}
+
+
+int FullCodeGenerator::TryCatch::Exit(int stack_depth) {
+ // The macros used here must preserve the result register.
+ __ Drop(stack_depth);
+ __ PopTryHandler();
+ return 0;
+}
+
+#undef __
+
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/full-codegen.h b/deps/v8/src/full-codegen.h
new file mode 100644
index 000000000..6688ff7c5
--- /dev/null
+++ b/deps/v8/src/full-codegen.h
@@ -0,0 +1,452 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_FULL_CODEGEN_H_
+#define V8_FULL_CODEGEN_H_
+
+#include "v8.h"
+
+#include "ast.h"
+
+namespace v8 {
+namespace internal {
+
+class FullCodeGenSyntaxChecker: public AstVisitor {
+ public:
+ FullCodeGenSyntaxChecker() : has_supported_syntax_(true) {}
+
+ void Check(FunctionLiteral* fun);
+
+ bool has_supported_syntax() { return has_supported_syntax_; }
+
+ private:
+ void VisitDeclarations(ZoneList<Declaration*>* decls);
+ void VisitStatements(ZoneList<Statement*>* stmts);
+
+ // AST node visit functions.
+#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
+ AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
+
+ bool has_supported_syntax_;
+
+ DISALLOW_COPY_AND_ASSIGN(FullCodeGenSyntaxChecker);
+};
+
+
+// -----------------------------------------------------------------------------
+// Full code generator.
+
+class FullCodeGenerator: public AstVisitor {
+ public:
+ enum Mode {
+ PRIMARY,
+ SECONDARY
+ };
+
+ FullCodeGenerator(MacroAssembler* masm, Handle<Script> script, bool is_eval)
+ : masm_(masm),
+ script_(script),
+ is_eval_(is_eval),
+ function_(NULL),
+ nesting_stack_(NULL),
+ loop_depth_(0),
+ location_(kStack),
+ true_label_(NULL),
+ false_label_(NULL) {
+ }
+
+ static Handle<Code> MakeCode(FunctionLiteral* fun,
+ Handle<Script> script,
+ bool is_eval);
+
+ void Generate(FunctionLiteral* fun, Mode mode);
+
+ private:
+ class Breakable;
+ class Iteration;
+ class TryCatch;
+ class TryFinally;
+ class Finally;
+ class ForIn;
+
+ class NestedStatement BASE_EMBEDDED {
+ public:
+ explicit NestedStatement(FullCodeGenerator* codegen) : codegen_(codegen) {
+ // Link into codegen's nesting stack.
+ previous_ = codegen->nesting_stack_;
+ codegen->nesting_stack_ = this;
+ }
+ virtual ~NestedStatement() {
+ // Unlink from codegen's nesting stack.
+ ASSERT_EQ(this, codegen_->nesting_stack_);
+ codegen_->nesting_stack_ = previous_;
+ }
+
+ virtual Breakable* AsBreakable() { return NULL; }
+ virtual Iteration* AsIteration() { return NULL; }
+ virtual TryCatch* AsTryCatch() { return NULL; }
+ virtual TryFinally* AsTryFinally() { return NULL; }
+ virtual Finally* AsFinally() { return NULL; }
+ virtual ForIn* AsForIn() { return NULL; }
+
+ virtual bool IsContinueTarget(Statement* target) { return false; }
+ virtual bool IsBreakTarget(Statement* target) { return false; }
+
+ // Generate code to leave the nested statement. This includes
+ // cleaning up any stack elements in use and restoring the
+ // stack to the expectations of the surrounding statements.
+ // Takes a number of stack elements currently on top of the
+ // nested statement's stack, and returns a number of stack
+ // elements left on top of the surrounding statement's stack.
+ // The generated code must preserve the result register (which
+ // contains the value in case of a return).
+ virtual int Exit(int stack_depth) {
+ // Default implementation for the case where there is
+ // nothing to clean up.
+ return stack_depth;
+ }
+ NestedStatement* outer() { return previous_; }
+ protected:
+ MacroAssembler* masm() { return codegen_->masm(); }
+ private:
+ FullCodeGenerator* codegen_;
+ NestedStatement* previous_;
+ DISALLOW_COPY_AND_ASSIGN(NestedStatement);
+ };
+
+ class Breakable : public NestedStatement {
+ public:
+ Breakable(FullCodeGenerator* codegen,
+ BreakableStatement* break_target)
+ : NestedStatement(codegen),
+ target_(break_target) {}
+ virtual ~Breakable() {}
+ virtual Breakable* AsBreakable() { return this; }
+ virtual bool IsBreakTarget(Statement* statement) {
+ return target_ == statement;
+ }
+ BreakableStatement* statement() { return target_; }
+ Label* break_target() { return &break_target_label_; }
+ private:
+ BreakableStatement* target_;
+ Label break_target_label_;
+ DISALLOW_COPY_AND_ASSIGN(Breakable);
+ };
+
+ class Iteration : public Breakable {
+ public:
+ Iteration(FullCodeGenerator* codegen,
+ IterationStatement* iteration_statement)
+ : Breakable(codegen, iteration_statement) {}
+ virtual ~Iteration() {}
+ virtual Iteration* AsIteration() { return this; }
+ virtual bool IsContinueTarget(Statement* statement) {
+ return this->statement() == statement;
+ }
+ Label* continue_target() { return &continue_target_label_; }
+ private:
+ Label continue_target_label_;
+ DISALLOW_COPY_AND_ASSIGN(Iteration);
+ };
+
+ // The environment inside the try block of a try/catch statement.
+ class TryCatch : public NestedStatement {
+ public:
+ explicit TryCatch(FullCodeGenerator* codegen, Label* catch_entry)
+ : NestedStatement(codegen), catch_entry_(catch_entry) { }
+ virtual ~TryCatch() {}
+ virtual TryCatch* AsTryCatch() { return this; }
+ Label* catch_entry() { return catch_entry_; }
+ virtual int Exit(int stack_depth);
+ private:
+ Label* catch_entry_;
+ DISALLOW_COPY_AND_ASSIGN(TryCatch);
+ };
+
+ // The environment inside the try block of a try/finally statement.
+ class TryFinally : public NestedStatement {
+ public:
+ explicit TryFinally(FullCodeGenerator* codegen, Label* finally_entry)
+ : NestedStatement(codegen), finally_entry_(finally_entry) { }
+ virtual ~TryFinally() {}
+ virtual TryFinally* AsTryFinally() { return this; }
+ Label* finally_entry() { return finally_entry_; }
+ virtual int Exit(int stack_depth);
+ private:
+ Label* finally_entry_;
+ DISALLOW_COPY_AND_ASSIGN(TryFinally);
+ };
+
+ // A FinallyEnvironment represents being inside a finally block.
+ // Abnormal termination of the finally block needs to clean up
+ // the block's parameters from the stack.
+ class Finally : public NestedStatement {
+ public:
+ explicit Finally(FullCodeGenerator* codegen) : NestedStatement(codegen) { }
+ virtual ~Finally() {}
+ virtual Finally* AsFinally() { return this; }
+ virtual int Exit(int stack_depth) {
+ return stack_depth + kFinallyStackElementCount;
+ }
+ private:
+ // Number of extra stack slots occupied during a finally block.
+ static const int kFinallyStackElementCount = 2;
+ DISALLOW_COPY_AND_ASSIGN(Finally);
+ };
+
+ // A ForInEnvironment represents being inside a for-in loop.
+ // Abnormal termination of the for-in block needs to clean up
+ // the block's temporary storage from the stack.
+ class ForIn : public Iteration {
+ public:
+ ForIn(FullCodeGenerator* codegen,
+ ForInStatement* statement)
+ : Iteration(codegen, statement) { }
+ virtual ~ForIn() {}
+ virtual ForIn* AsForIn() { return this; }
+ virtual int Exit(int stack_depth) {
+ return stack_depth + kForInStackElementCount;
+ }
+ private:
+ // TODO(lrn): Check that this value is correct when implementing
+ // for-in.
+ static const int kForInStackElementCount = 5;
+ DISALLOW_COPY_AND_ASSIGN(ForIn);
+ };
+
+ enum Location {
+ kAccumulator,
+ kStack
+ };
+
+ int SlotOffset(Slot* slot);
+
+ // Emit code to convert a pure value (in a register, slot, as a literal,
+ // or on top of the stack) into the result expected according to an
+ // expression context.
+ void Apply(Expression::Context context, Register reg);
+
+ // Slot cannot have type Slot::LOOKUP.
+ void Apply(Expression::Context context, Slot* slot);
+
+ void Apply(Expression::Context context, Literal* lit);
+ void ApplyTOS(Expression::Context context);
+
+ // Emit code to discard count elements from the top of stack, then convert
+ // a pure value into the result expected according to an expression
+ // context.
+ void DropAndApply(int count, Expression::Context context, Register reg);
+
+ // Emit code to convert pure control flow to a pair of labels into the
+ // result expected according to an expression context.
+ void Apply(Expression::Context context,
+ Label* materialize_true,
+ Label* materialize_false);
+
+ // Helper function to convert a pure value into a test context. The value
+ // is expected on the stack or the accumulator, depending on the platform.
+ // See the platform-specific implementation for details.
+ void DoTest(Expression::Context context);
+
+ void Move(Slot* dst, Register source, Register scratch1, Register scratch2);
+ void Move(Register dst, Slot* source);
+
+ // Return an operand used to read/write to a known (ie, non-LOOKUP) slot.
+ // May emit code to traverse the context chain, destroying the scratch
+ // register.
+ MemOperand EmitSlotSearch(Slot* slot, Register scratch);
+
+ void VisitForEffect(Expression* expr) {
+ Expression::Context saved_context = context_;
+ context_ = Expression::kEffect;
+ Visit(expr);
+ context_ = saved_context;
+ }
+
+ void VisitForValue(Expression* expr, Location where) {
+ Expression::Context saved_context = context_;
+ Location saved_location = location_;
+ context_ = Expression::kValue;
+ location_ = where;
+ Visit(expr);
+ context_ = saved_context;
+ location_ = saved_location;
+ }
+
+ void VisitForControl(Expression* expr, Label* if_true, Label* if_false) {
+ Expression::Context saved_context = context_;
+ Label* saved_true = true_label_;
+ Label* saved_false = false_label_;
+ context_ = Expression::kTest;
+ true_label_ = if_true;
+ false_label_ = if_false;
+ Visit(expr);
+ context_ = saved_context;
+ true_label_ = saved_true;
+ false_label_ = saved_false;
+ }
+
+ void VisitForValueControl(Expression* expr,
+ Location where,
+ Label* if_true,
+ Label* if_false) {
+ Expression::Context saved_context = context_;
+ Location saved_location = location_;
+ Label* saved_true = true_label_;
+ Label* saved_false = false_label_;
+ context_ = Expression::kValueTest;
+ location_ = where;
+ true_label_ = if_true;
+ false_label_ = if_false;
+ Visit(expr);
+ context_ = saved_context;
+ location_ = saved_location;
+ true_label_ = saved_true;
+ false_label_ = saved_false;
+ }
+
+ void VisitForControlValue(Expression* expr,
+ Location where,
+ Label* if_true,
+ Label* if_false) {
+ Expression::Context saved_context = context_;
+ Location saved_location = location_;
+ Label* saved_true = true_label_;
+ Label* saved_false = false_label_;
+ context_ = Expression::kTestValue;
+ location_ = where;
+ true_label_ = if_true;
+ false_label_ = if_false;
+ Visit(expr);
+ context_ = saved_context;
+ location_ = saved_location;
+ true_label_ = saved_true;
+ false_label_ = saved_false;
+ }
+
+ void VisitDeclarations(ZoneList<Declaration*>* declarations);
+ void DeclareGlobals(Handle<FixedArray> pairs);
+
+ // Platform-specific return sequence
+ void EmitReturnSequence(int position);
+
+ // Platform-specific code sequences for calls
+ void EmitCallWithStub(Call* expr);
+ void EmitCallWithIC(Call* expr, Handle<Object> name, RelocInfo::Mode mode);
+
+ // Platform-specific code for loading variables.
+ void EmitVariableLoad(Variable* expr, Expression::Context context);
+
+ // Platform-specific support for compiling assignments.
+
+ // Load a value from a named property.
+ // The receiver is left on the stack by the IC.
+ void EmitNamedPropertyLoad(Property* expr);
+
+ // Load a value from a keyed property.
+ // The receiver and the key is left on the stack by the IC.
+ void EmitKeyedPropertyLoad(Property* expr);
+
+ // Apply the compound assignment operator. Expects the left operand on top
+ // of the stack and the right one in the accumulator.
+ void EmitBinaryOp(Token::Value op, Expression::Context context);
+
+ // Complete a variable assignment. The right-hand-side value is expected
+ // in the accumulator.
+ void EmitVariableAssignment(Variable* var, Expression::Context context);
+
+ // Complete a named property assignment. The receiver is expected on top
+ // of the stack and the right-hand-side value in the accumulator.
+ void EmitNamedPropertyAssignment(Assignment* expr);
+
+ // Complete a keyed property assignment. The receiver and key are
+ // expected on top of the stack and the right-hand-side value in the
+ // accumulator.
+ void EmitKeyedPropertyAssignment(Assignment* expr);
+
+ void SetFunctionPosition(FunctionLiteral* fun);
+ void SetReturnPosition(FunctionLiteral* fun);
+ void SetStatementPosition(Statement* stmt);
+ void SetStatementPosition(int pos);
+ void SetSourcePosition(int pos);
+
+ // Non-local control flow support.
+ void EnterFinallyBlock();
+ void ExitFinallyBlock();
+
+ // Loop nesting counter.
+ int loop_depth() { return loop_depth_; }
+ void increment_loop_depth() { loop_depth_++; }
+ void decrement_loop_depth() {
+ ASSERT(loop_depth_ > 0);
+ loop_depth_--;
+ }
+
+ MacroAssembler* masm() { return masm_; }
+ static Register result_register();
+ static Register context_register();
+
+ // Set fields in the stack frame. Offsets are the frame pointer relative
+ // offsets defined in, e.g., StandardFrameConstants.
+ void StoreToFrameField(int frame_offset, Register value);
+
+ // Load a value from the current context. Indices are defined as an enum
+ // in v8::internal::Context.
+ void LoadContextField(Register dst, int context_index);
+
+ // AST node visit functions.
+#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
+ AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
+ // Handles the shortcutted logical binary operations in VisitBinaryOperation.
+ void EmitLogicalOperation(BinaryOperation* expr);
+
+ MacroAssembler* masm_;
+ Handle<Script> script_;
+ bool is_eval_;
+
+ FunctionLiteral* function_;
+
+ Label return_label_;
+ NestedStatement* nesting_stack_;
+ int loop_depth_;
+
+ Expression::Context context_;
+ Location location_;
+ Label* true_label_;
+ Label* false_label_;
+
+ friend class NestedStatement;
+
+ DISALLOW_COPY_AND_ASSIGN(FullCodeGenerator);
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_FULL_CODEGEN_H_
diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h
index a2cd2e454..39f6bcb26 100644
--- a/deps/v8/src/globals.h
+++ b/deps/v8/src/globals.h
@@ -204,6 +204,7 @@ class AccessorInfo;
class Allocation;
class Arguments;
class Assembler;
+class AssertNoAllocation;
class BreakableStatement;
class Code;
class CodeGenerator;
@@ -379,6 +380,12 @@ enum InLoopFlag {
};
+enum CallFunctionFlags {
+ NO_CALL_FUNCTION_FLAGS = 0,
+ RECEIVER_MIGHT_BE_VALUE = 1 << 0 // Receiver might not be a JSObject.
+};
+
+
// Type of properties.
// Order of properties is significant.
// Must fit in the BitField PropertyDetails::TypeField.
diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles.cc
index d551e21c5..c66056ebb 100644
--- a/deps/v8/src/handles.cc
+++ b/deps/v8/src/handles.cc
@@ -31,6 +31,7 @@
#include "api.h"
#include "arguments.h"
#include "bootstrapper.h"
+#include "codegen.h"
#include "compiler.h"
#include "debug.h"
#include "execution.h"
@@ -666,31 +667,52 @@ Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
}
-bool CompileLazyShared(Handle<SharedFunctionInfo> shared,
- ClearExceptionFlag flag,
- int loop_nesting) {
+bool EnsureCompiled(Handle<SharedFunctionInfo> shared,
+ ClearExceptionFlag flag) {
+ return shared->is_compiled() || CompileLazyShared(shared, flag);
+}
+
+
+static bool CompileLazyHelper(CompilationInfo* info,
+ ClearExceptionFlag flag) {
// Compile the source information to a code object.
- ASSERT(!shared->is_compiled());
- bool result = Compiler::CompileLazy(shared, loop_nesting);
+ ASSERT(!info->shared_info()->is_compiled());
+ bool result = Compiler::CompileLazy(info);
ASSERT(result != Top::has_pending_exception());
if (!result && flag == CLEAR_EXCEPTION) Top::clear_pending_exception();
return result;
}
-bool CompileLazy(Handle<JSFunction> function, ClearExceptionFlag flag) {
- // Compile the source information to a code object.
+bool CompileLazyShared(Handle<SharedFunctionInfo> shared,
+ ClearExceptionFlag flag) {
+ CompilationInfo info(shared, Handle<Object>::null(), 0);
+ return CompileLazyHelper(&info, flag);
+}
+
+
+bool CompileLazy(Handle<JSFunction> function,
+ Handle<Object> receiver,
+ ClearExceptionFlag flag) {
Handle<SharedFunctionInfo> shared(function->shared());
- return CompileLazyShared(shared, flag, 0);
+ CompilationInfo info(shared, receiver, 0);
+ bool result = CompileLazyHelper(&info, flag);
+ LOG(FunctionCreateEvent(*function));
+ return result;
}
-bool CompileLazyInLoop(Handle<JSFunction> function, ClearExceptionFlag flag) {
- // Compile the source information to a code object.
+bool CompileLazyInLoop(Handle<JSFunction> function,
+ Handle<Object> receiver,
+ ClearExceptionFlag flag) {
Handle<SharedFunctionInfo> shared(function->shared());
- return CompileLazyShared(shared, flag, 1);
+ CompilationInfo info(shared, receiver, 1);
+ bool result = CompileLazyHelper(&info, flag);
+ LOG(FunctionCreateEvent(*function));
+ return result;
}
+
OptimizedObjectForAddingMultipleProperties::
OptimizedObjectForAddingMultipleProperties(Handle<JSObject> object,
int expected_additional_properties,
diff --git a/deps/v8/src/handles.h b/deps/v8/src/handles.h
index fe820d59e..04f087bd8 100644
--- a/deps/v8/src/handles.h
+++ b/deps/v8/src/handles.h
@@ -313,12 +313,19 @@ Handle<Object> SetPrototype(Handle<JSFunction> function,
// false if the compilation resulted in a stack overflow.
enum ClearExceptionFlag { KEEP_EXCEPTION, CLEAR_EXCEPTION };
+bool EnsureCompiled(Handle<SharedFunctionInfo> shared,
+ ClearExceptionFlag flag);
+
bool CompileLazyShared(Handle<SharedFunctionInfo> shared,
- ClearExceptionFlag flag,
- int loop_nesting);
+ ClearExceptionFlag flag);
+
+bool CompileLazy(Handle<JSFunction> function,
+ Handle<Object> receiver,
+ ClearExceptionFlag flag);
-bool CompileLazy(Handle<JSFunction> function, ClearExceptionFlag flag);
-bool CompileLazyInLoop(Handle<JSFunction> function, ClearExceptionFlag flag);
+bool CompileLazyInLoop(Handle<JSFunction> function,
+ Handle<Object> receiver,
+ ClearExceptionFlag flag);
// Returns the lazy compilation stub for argc arguments.
Handle<Code> ComputeLazyCompile(int argc);
diff --git a/deps/v8/src/heap-inl.h b/deps/v8/src/heap-inl.h
index bd4f86bb9..f18bf0f61 100644
--- a/deps/v8/src/heap-inl.h
+++ b/deps/v8/src/heap-inl.h
@@ -152,7 +152,11 @@ Object* Heap::AllocateRawCell() {
bool Heap::InNewSpace(Object* object) {
- return new_space_.Contains(object);
+ bool result = new_space_.Contains(object);
+ ASSERT(!result || // Either not in new space
+ gc_state_ != NOT_IN_GC || // ... or in the middle of GC
+ InToSpace(object)); // ... or in to-space (where we allocate).
+ return result;
}
diff --git a/deps/v8/src/heap-profiler.cc b/deps/v8/src/heap-profiler.cc
index b61505562..3cb65eeec 100644
--- a/deps/v8/src/heap-profiler.cc
+++ b/deps/v8/src/heap-profiler.cc
@@ -625,8 +625,7 @@ void HeapProfiler::WriteSample() {
ConstructorHeapProfile js_cons_profile;
RetainerHeapProfile js_retainer_profile;
HeapIterator iterator;
- while (iterator.has_next()) {
- HeapObject* obj = iterator.next();
+ for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
CollectStats(obj, info);
js_cons_profile.CollectStats(obj);
js_retainer_profile.CollectStats(obj);
diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc
index fba2e87c2..5f4d81501 100644
--- a/deps/v8/src/heap.cc
+++ b/deps/v8/src/heap.cc
@@ -76,8 +76,8 @@ int Heap::amount_of_external_allocated_memory_at_last_global_gc_ = 0;
// semispace_size_ should be a power of 2 and old_generation_size_ should be
// a multiple of Page::kPageSize.
#if defined(ANDROID)
-int Heap::max_semispace_size_ = 512*KB;
-int Heap::max_old_generation_size_ = 128*MB;
+int Heap::max_semispace_size_ = 2*MB;
+int Heap::max_old_generation_size_ = 192*MB;
int Heap::initial_semispace_size_ = 128*KB;
size_t Heap::code_range_size_ = 0;
#elif defined(V8_TARGET_ARCH_X64)
@@ -327,7 +327,7 @@ void Heap::GarbageCollectionPrologue() {
int Heap::SizeOfObjects() {
int total = 0;
AllSpaces spaces;
- while (Space* space = spaces.next()) {
+ for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
total += space->Size();
}
return total;
@@ -732,13 +732,14 @@ static void VerifyNonPointerSpacePointers() {
// do not expect them.
VerifyNonPointerSpacePointersVisitor v;
HeapObjectIterator code_it(Heap::code_space());
- while (code_it.has_next()) {
- HeapObject* object = code_it.next();
+ for (HeapObject* object = code_it.next();
+ object != NULL; object = code_it.next())
object->Iterate(&v);
- }
HeapObjectIterator data_it(Heap::old_data_space());
- while (data_it.has_next()) data_it.next()->Iterate(&v);
+ for (HeapObject* object = data_it.next();
+ object != NULL; object = data_it.next())
+ object->Iterate(&v);
}
#endif
@@ -804,8 +805,8 @@ void Heap::Scavenge() {
// Copy objects reachable from cells by scavenging cell values directly.
HeapObjectIterator cell_iterator(cell_space_);
- while (cell_iterator.has_next()) {
- HeapObject* cell = cell_iterator.next();
+ for (HeapObject* cell = cell_iterator.next();
+ cell != NULL; cell = cell_iterator.next()) {
if (cell->IsJSGlobalPropertyCell()) {
Address value_address =
reinterpret_cast<Address>(cell) +
@@ -1013,13 +1014,15 @@ void Heap::RebuildRSets() {
void Heap::RebuildRSets(PagedSpace* space) {
HeapObjectIterator it(space);
- while (it.has_next()) Heap::UpdateRSet(it.next());
+ for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
+ Heap::UpdateRSet(obj);
}
void Heap::RebuildRSets(LargeObjectSpace* space) {
LargeObjectIterator it(space);
- while (it.has_next()) Heap::UpdateRSet(it.next());
+ for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
+ Heap::UpdateRSet(obj);
}
@@ -1182,7 +1185,10 @@ Object* Heap::AllocatePartialMap(InstanceType instance_type,
reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
reinterpret_cast<Map*>(result)->set_inobject_properties(0);
+ reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
+ reinterpret_cast<Map*>(result)->set_bit_field(0);
+ reinterpret_cast<Map*>(result)->set_bit_field2(0);
return result;
}
@@ -1203,7 +1209,7 @@ Object* Heap::AllocateMap(InstanceType instance_type, int instance_size) {
map->set_code_cache(empty_fixed_array());
map->set_unused_property_fields(0);
map->set_bit_field(0);
- map->set_bit_field2(0);
+ map->set_bit_field2(1 << Map::kIsExtensible);
// If the map object is aligned fill the padding area with Smi 0 objects.
if (Map::kPadStart < Map::kSize) {
@@ -1493,8 +1499,8 @@ void Heap::CreateRegExpCEntryStub() {
void Heap::CreateCEntryDebugBreakStub() {
- CEntryDebugBreakStub stub;
- set_c_entry_debug_break_code(*stub.GetCode());
+ DebuggerStatementStub stub;
+ set_debugger_statement_code(*stub.GetCode());
}
@@ -1520,8 +1526,8 @@ void Heap::CreateFixedStubs() {
// { CEntryStub stub;
// c_entry_code_ = *stub.GetCode();
// }
- // { CEntryDebugBreakStub stub;
- // c_entry_debug_break_code_ = *stub.GetCode();
+ // { DebuggerStatementStub stub;
+ // debugger_statement_code_ = *stub.GetCode();
// }
// To workaround the problem, make separate functions without inlining.
Heap::CreateCEntryStub();
@@ -1723,7 +1729,7 @@ void Heap::SetNumberStringCache(Object* number, String* string) {
int mask = (number_string_cache()->length() >> 1) - 1;
if (number->IsSmi()) {
hash = smi_get_hash(Smi::cast(number)) & mask;
- number_string_cache()->set(hash * 2, number, SKIP_WRITE_BARRIER);
+ number_string_cache()->set(hash * 2, Smi::cast(number));
} else {
hash = double_get_hash(number->Number()) & mask;
number_string_cache()->set(hash * 2, number);
@@ -1980,8 +1986,10 @@ Object* Heap::AllocateConsString(String* first, String* second) {
Object* result = Allocate(map, NEW_SPACE);
if (result->IsFailure()) return result;
+
+ AssertNoAllocation no_gc;
ConsString* cons_string = ConsString::cast(result);
- WriteBarrierMode mode = cons_string->GetWriteBarrierMode();
+ WriteBarrierMode mode = cons_string->GetWriteBarrierMode(no_gc);
cons_string->set_length(length);
cons_string->set_hash_field(String::kEmptyHashField);
cons_string->set_first(first, mode);
@@ -2279,7 +2287,7 @@ Object* Heap::InitializeFunction(JSFunction* function,
function->set_shared(shared);
function->set_prototype_or_initial_map(prototype);
function->set_context(undefined_value());
- function->set_literals(empty_fixed_array(), SKIP_WRITE_BARRIER);
+ function->set_literals(empty_fixed_array());
return function;
}
@@ -2398,8 +2406,10 @@ Object* Heap::AllocateInitialMap(JSFunction* fun) {
String* name = fun->shared()->GetThisPropertyAssignmentName(i);
ASSERT(name->IsSymbol());
FieldDescriptor field(name, i, NONE);
+ field.SetEnumerationIndex(i);
descriptors->Set(i, &field);
}
+ descriptors->SetNextEnumerationIndex(count);
descriptors->Sort();
map->set_instance_descriptors(descriptors);
map->set_pre_allocated_property_fields(count);
@@ -2880,8 +2890,10 @@ Object* Heap::CopyFixedArray(FixedArray* src) {
HeapObject::cast(obj)->set_map(src->map());
FixedArray* result = FixedArray::cast(obj);
result->set_length(len);
+
// Copy the content
- WriteBarrierMode mode = result->GetWriteBarrierMode();
+ AssertNoAllocation no_gc;
+ WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
return result;
}
@@ -2899,6 +2911,7 @@ Object* Heap::AllocateFixedArray(int length) {
Object* value = undefined_value();
// Initialize body.
for (int index = 0; index < length; index++) {
+ ASSERT(!Heap::InNewSpace(value)); // value = undefined
array->set(index, value, SKIP_WRITE_BARRIER);
}
}
@@ -2954,6 +2967,7 @@ Object* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
array->set_length(length);
Object* value = undefined_value();
for (int index = 0; index < length; index++) {
+ ASSERT(!Heap::InNewSpace(value)); // value = undefined
array->set(index, value, SKIP_WRITE_BARRIER);
}
return array;
@@ -2971,6 +2985,7 @@ Object* Heap::AllocateFixedArrayWithHoles(int length) {
// Initialize body.
Object* value = the_hole_value();
for (int index = 0; index < length; index++) {
+ ASSERT(!Heap::InNewSpace(value)); // value = the hole
array->set(index, value, SKIP_WRITE_BARRIER);
}
}
@@ -3106,7 +3121,8 @@ void Heap::Print() {
if (!HasBeenSetup()) return;
Top::PrintStack();
AllSpaces spaces;
- while (Space* space = spaces.next()) space->Print();
+ for (Space* space = spaces.next(); space != NULL; space = spaces.next())
+ space->Print();
}
@@ -3340,6 +3356,11 @@ void Heap::IterateRSet(PagedSpace* space, ObjectSlotCallback copy_object_func) {
void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
IterateStrongRoots(v, mode);
+ IterateWeakRoots(v, mode);
+}
+
+
+void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex]));
v->Synchronize("symbol_table");
if (mode != VISIT_ALL_IN_SCAVENGE) {
@@ -3394,6 +3415,20 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
// Iterate over pointers being held by inactive threads.
ThreadManager::Iterate(v);
v->Synchronize("threadmanager");
+
+ // Iterate over the pointers the Serialization/Deserialization code is
+ // holding.
+ // During garbage collection this keeps the partial snapshot cache alive.
+ // During deserialization of the startup snapshot this creates the partial
+ // snapshot cache and deserializes the objects it refers to. During
+ // serialization this does nothing, since the partial snapshot cache is
+ // empty. However the next thing we do is create the partial snapshot,
+ // filling up the partial snapshot cache with objects it needs as we go.
+ SerializerDeserializer::Iterate(v);
+ // We don't do a v->Synchronize call here, because in debug mode that will
+ // output a flag to the snapshot. However at this point the serializer and
+ // deserializer are deliberately a little unsynchronized (see above) so the
+ // checking of the sync flag in the snapshot would fail.
}
@@ -3544,7 +3579,8 @@ bool Heap::Setup(bool create_heap_objects) {
// Initialize map space.
map_space_ = new MapSpace(FLAG_use_big_map_space
? max_old_generation_size_
- : (MapSpace::kMaxMapPageIndex + 1) * Page::kPageSize,
+ : MapSpace::kMaxMapPageIndex * Page::kPageSize,
+ FLAG_max_map_space_pages,
MAP_SPACE);
if (map_space_ == NULL) return false;
if (!map_space_->Setup(NULL, 0)) return false;
@@ -3647,7 +3683,8 @@ void Heap::TearDown() {
void Heap::Shrink() {
// Try to shrink all paged spaces.
PagedSpaces spaces;
- while (PagedSpace* space = spaces.next()) space->Shrink();
+ for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
+ space->Shrink();
}
@@ -3656,7 +3693,8 @@ void Heap::Shrink() {
void Heap::Protect() {
if (HasBeenSetup()) {
AllSpaces spaces;
- while (Space* space = spaces.next()) space->Protect();
+ for (Space* space = spaces.next(); space != NULL; space = spaces.next())
+ space->Protect();
}
}
@@ -3664,7 +3702,8 @@ void Heap::Protect() {
void Heap::Unprotect() {
if (HasBeenSetup()) {
AllSpaces spaces;
- while (Space* space = spaces.next()) space->Unprotect();
+ for (Space* space = spaces.next(); space != NULL; space = spaces.next())
+ space->Unprotect();
}
}
@@ -3836,34 +3875,25 @@ void HeapIterator::Shutdown() {
}
-bool HeapIterator::has_next() {
+HeapObject* HeapIterator::next() {
// No iterator means we are done.
- if (object_iterator_ == NULL) return false;
+ if (object_iterator_ == NULL) return NULL;
- if (object_iterator_->has_next_object()) {
+ if (HeapObject* obj = object_iterator_->next_object()) {
// If the current iterator has more objects we are fine.
- return true;
+ return obj;
} else {
// Go though the spaces looking for one that has objects.
while (space_iterator_->has_next()) {
object_iterator_ = space_iterator_->next();
- if (object_iterator_->has_next_object()) {
- return true;
+ if (HeapObject* obj = object_iterator_->next_object()) {
+ return obj;
}
}
}
// Done with the last space.
object_iterator_ = NULL;
- return false;
-}
-
-
-HeapObject* HeapIterator::next() {
- if (has_next()) {
- return object_iterator_->next_object();
- } else {
- return NULL;
- }
+ return NULL;
}
diff --git a/deps/v8/src/heap.h b/deps/v8/src/heap.h
index 1f044441a..cbf0b73ed 100644
--- a/deps/v8/src/heap.h
+++ b/deps/v8/src/heap.h
@@ -101,7 +101,7 @@ namespace internal {
V(Code, js_entry_code, JsEntryCode) \
V(Code, js_construct_entry_code, JsConstructEntryCode) \
V(Code, c_entry_code, CEntryCode) \
- V(Code, c_entry_debug_break_code, CEntryDebugBreakCode) \
+ V(Code, debugger_statement_code, DebuggerStatementCode) \
V(FixedArray, number_string_cache, NumberStringCache) \
V(FixedArray, single_character_string_cache, SingleCharacterStringCache) \
V(FixedArray, natives_source_cache, NativesSourceCache) \
@@ -690,6 +690,8 @@ class Heap : public AllStatic {
static void IterateRoots(ObjectVisitor* v, VisitMode mode);
// Iterates over all strong roots in the heap.
static void IterateStrongRoots(ObjectVisitor* v, VisitMode mode);
+ // Iterates over all the other roots in the heap.
+ static void IterateWeakRoots(ObjectVisitor* v, VisitMode mode);
// Iterates remembered set of an old space.
static void IterateRSet(PagedSpace* space, ObjectSlotCallback callback);
@@ -1290,7 +1292,6 @@ class HeapIterator BASE_EMBEDDED {
explicit HeapIterator();
virtual ~HeapIterator();
- bool has_next();
HeapObject* next();
void reset();
diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc
index 0e9ffeaab..dc017ae32 100644
--- a/deps/v8/src/ia32/assembler-ia32.cc
+++ b/deps/v8/src/ia32/assembler-ia32.cc
@@ -860,6 +860,24 @@ void Assembler::cmpb(const Operand& op, int8_t imm8) {
}
+void Assembler::cmpb(const Operand& dst, Register src) {
+ ASSERT(src.is_byte_register());
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x38);
+ emit_operand(src, dst);
+}
+
+
+void Assembler::cmpb(Register dst, const Operand& src) {
+ ASSERT(dst.is_byte_register());
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x3A);
+ emit_operand(dst, src);
+}
+
+
void Assembler::cmpw(const Operand& op, Immediate imm16) {
ASSERT(imm16.is_int16());
EnsureSpace ensure_space(this);
@@ -1261,6 +1279,14 @@ void Assembler::test(Register reg, const Operand& op) {
}
+void Assembler::test_b(Register reg, const Operand& op) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x84);
+ emit_operand(reg, op);
+}
+
+
void Assembler::test(const Operand& op, const Immediate& imm) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h
index f35abd575..9ce073437 100644
--- a/deps/v8/src/ia32/assembler-ia32.h
+++ b/deps/v8/src/ia32/assembler-ia32.h
@@ -229,8 +229,9 @@ enum ScaleFactor {
times_2 = 1,
times_4 = 2,
times_8 = 3,
- times_pointer_size = times_4,
- times_half_pointer_size = times_2
+ times_int_size = times_4,
+ times_half_pointer_size = times_2,
+ times_pointer_size = times_4
};
@@ -559,6 +560,8 @@ class Assembler : public Malloced {
void and_(const Operand& dst, const Immediate& x);
void cmpb(const Operand& op, int8_t imm8);
+ void cmpb(Register src, const Operand& dst);
+ void cmpb(const Operand& dst, Register src);
void cmpb_al(const Operand& op);
void cmpw_ax(const Operand& op);
void cmpw(const Operand& op, Immediate imm16);
@@ -624,6 +627,7 @@ class Assembler : public Malloced {
void test(Register reg, const Immediate& imm);
void test(Register reg, const Operand& op);
+ void test_b(Register reg, const Operand& op);
void test(const Operand& op, const Immediate& imm);
void xor_(Register dst, int32_t imm32);
diff --git a/deps/v8/src/ia32/builtins-ia32.cc b/deps/v8/src/ia32/builtins-ia32.cc
index d56c02de1..2c5b1d1f5 100644
--- a/deps/v8/src/ia32/builtins-ia32.cc
+++ b/deps/v8/src/ia32/builtins-ia32.cc
@@ -36,15 +36,36 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-void Builtins::Generate_Adaptor(MacroAssembler* masm, CFunctionId id) {
- // TODO(428): Don't pass the function in a static variable.
- ExternalReference passed = ExternalReference::builtin_passed_function();
- __ mov(Operand::StaticVariable(passed), edi);
-
- // The actual argument count has already been loaded into register
- // eax, but JumpToRuntime expects eax to contain the number of
- // arguments including the receiver.
- __ inc(eax);
+void Builtins::Generate_Adaptor(MacroAssembler* masm,
+ CFunctionId id,
+ BuiltinExtraArguments extra_args) {
+ // ----------- S t a t e -------------
+ // -- eax : number of arguments excluding receiver
+ // -- edi : called function (only guaranteed when
+ // extra_args requires it)
+ // -- esi : context
+ // -- esp[0] : return address
+ // -- esp[4] : last argument
+ // -- ...
+ // -- esp[4 * argc] : first argument (argc == eax)
+ // -- esp[4 * (argc +1)] : receiver
+ // -----------------------------------
+
+ // Insert extra arguments.
+ int num_extra_args = 0;
+ if (extra_args == NEEDS_CALLED_FUNCTION) {
+ num_extra_args = 1;
+ Register scratch = ebx;
+ __ pop(scratch); // Save return address.
+ __ push(edi);
+ __ push(scratch); // Restore return address.
+ } else {
+ ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
+ }
+
+ // JumpToRuntime expects eax to contain the number of arguments
+ // including the receiver and the extra arguments.
+ __ add(Operand(eax), Immediate(num_extra_args + 1));
__ JumpToRuntime(ExternalReference(id));
}
@@ -81,7 +102,8 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
}
-void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
+static void Generate_JSConstructStubHelper(MacroAssembler* masm,
+ bool is_api_function) {
// Enter a construct frame.
__ EnterConstructFrame();
@@ -277,8 +299,17 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ j(greater_equal, &loop);
// Call the function.
- ParameterCount actual(eax);
- __ InvokeFunction(edi, actual, CALL_FUNCTION);
+ if (is_api_function) {
+ __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+ Handle<Code> code = Handle<Code>(
+ Builtins::builtin(Builtins::HandleApiCallConstruct));
+ ParameterCount expected(0);
+ __ InvokeCode(code, expected, expected,
+ RelocInfo::CODE_TARGET, CALL_FUNCTION);
+ } else {
+ ParameterCount actual(eax);
+ __ InvokeFunction(edi, actual, CALL_FUNCTION);
+ }
// Restore context from the frame.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
@@ -319,6 +350,16 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
}
+void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, false);
+}
+
+
+void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, true);
+}
+
+
static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
bool is_construct) {
// Clear the context before we push it when entering the JS frame.
diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc
index 993675ba6..fe91903e5 100644
--- a/deps/v8/src/ia32/codegen-ia32.cc
+++ b/deps/v8/src/ia32/codegen-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -103,13 +103,13 @@ CodeGenState::~CodeGenState() {
// -------------------------------------------------------------------------
// CodeGenerator implementation
-CodeGenerator::CodeGenerator(int buffer_size,
+CodeGenerator::CodeGenerator(MacroAssembler* masm,
Handle<Script> script,
bool is_eval)
: is_eval_(is_eval),
script_(script),
deferred_(8),
- masm_(new MacroAssembler(NULL, buffer_size)),
+ masm_(masm),
scope_(NULL),
frame_(NULL),
allocator_(NULL),
@@ -126,7 +126,9 @@ CodeGenerator::CodeGenerator(int buffer_size,
// edi: called JS function
// esi: callee's context
-void CodeGenerator::GenCode(FunctionLiteral* fun) {
+void CodeGenerator::Generate(FunctionLiteral* fun,
+ Mode mode,
+ CompilationInfo* info) {
// Record the position for debugging purposes.
CodeForFunctionPosition(fun);
@@ -143,7 +145,7 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) {
set_in_spilled_code(false);
// Adjust for function-level loop nesting.
- loop_nesting_ += fun->loop_nesting();
+ loop_nesting_ += info->loop_nesting();
JumpTarget::set_compiling_deferred_code(false);
@@ -167,96 +169,106 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) {
// edi: called JS function
// esi: callee's context
allocator_->Initialize();
- frame_->Enter();
- // Allocate space for locals and initialize them.
- frame_->AllocateStackSlots();
- // Initialize the function return target after the locals are set
- // up, because it needs the expected frame height from the frame.
- function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
- function_return_is_shadowed_ = false;
-
- // Allocate the local context if needed.
- int heap_slots = scope_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0) {
- Comment cmnt(masm_, "[ allocate local context");
- // Allocate local context.
- // Get outer context and create a new context based on it.
- frame_->PushFunction();
- Result context;
- if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(heap_slots);
- context = frame_->CallStub(&stub, 1);
- } else {
- context = frame_->CallRuntime(Runtime::kNewContext, 1);
- }
+ if (mode == PRIMARY) {
+ frame_->Enter();
+
+ // Allocate space for locals and initialize them.
+ frame_->AllocateStackSlots();
+
+ // Allocate the local context if needed.
+ int heap_slots = scope_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ if (heap_slots > 0) {
+ Comment cmnt(masm_, "[ allocate local context");
+ // Allocate local context.
+ // Get outer context and create a new context based on it.
+ frame_->PushFunction();
+ Result context;
+ if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(heap_slots);
+ context = frame_->CallStub(&stub, 1);
+ } else {
+ context = frame_->CallRuntime(Runtime::kNewContext, 1);
+ }
- // Update context local.
- frame_->SaveContextRegister();
+ // Update context local.
+ frame_->SaveContextRegister();
- // Verify that the runtime call result and esi agree.
- if (FLAG_debug_code) {
- __ cmp(context.reg(), Operand(esi));
- __ Assert(equal, "Runtime::NewContext should end up in esi");
+ // Verify that the runtime call result and esi agree.
+ if (FLAG_debug_code) {
+ __ cmp(context.reg(), Operand(esi));
+ __ Assert(equal, "Runtime::NewContext should end up in esi");
+ }
}
- }
- // TODO(1241774): Improve this code:
- // 1) only needed if we have a context
- // 2) no need to recompute context ptr every single time
- // 3) don't copy parameter operand code from SlotOperand!
- {
- Comment cmnt2(masm_, "[ copy context parameters into .context");
-
- // Note that iteration order is relevant here! If we have the same
- // parameter twice (e.g., function (x, y, x)), and that parameter
- // needs to be copied into the context, it must be the last argument
- // passed to the parameter that needs to be copied. This is a rare
- // case so we don't check for it, instead we rely on the copying
- // order: such a parameter is copied repeatedly into the same
- // context location and thus the last value is what is seen inside
- // the function.
- for (int i = 0; i < scope_->num_parameters(); i++) {
- Variable* par = scope_->parameter(i);
- Slot* slot = par->slot();
- if (slot != NULL && slot->type() == Slot::CONTEXT) {
- // The use of SlotOperand below is safe in unspilled code
- // because the slot is guaranteed to be a context slot.
- //
- // There are no parameters in the global scope.
- ASSERT(!scope_->is_global_scope());
- frame_->PushParameterAt(i);
- Result value = frame_->Pop();
- value.ToRegister();
-
- // SlotOperand loads context.reg() with the context object
- // stored to, used below in RecordWrite.
- Result context = allocator_->Allocate();
- ASSERT(context.is_valid());
- __ mov(SlotOperand(slot, context.reg()), value.reg());
- int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
- Result scratch = allocator_->Allocate();
- ASSERT(scratch.is_valid());
- frame_->Spill(context.reg());
- frame_->Spill(value.reg());
- __ RecordWrite(context.reg(), offset, value.reg(), scratch.reg());
+ // TODO(1241774): Improve this code:
+ // 1) only needed if we have a context
+ // 2) no need to recompute context ptr every single time
+ // 3) don't copy parameter operand code from SlotOperand!
+ {
+ Comment cmnt2(masm_, "[ copy context parameters into .context");
+
+ // Note that iteration order is relevant here! If we have the same
+ // parameter twice (e.g., function (x, y, x)), and that parameter
+ // needs to be copied into the context, it must be the last argument
+ // passed to the parameter that needs to be copied. This is a rare
+ // case so we don't check for it, instead we rely on the copying
+ // order: such a parameter is copied repeatedly into the same
+ // context location and thus the last value is what is seen inside
+ // the function.
+ for (int i = 0; i < scope_->num_parameters(); i++) {
+ Variable* par = scope_->parameter(i);
+ Slot* slot = par->slot();
+ if (slot != NULL && slot->type() == Slot::CONTEXT) {
+ // The use of SlotOperand below is safe in unspilled code
+ // because the slot is guaranteed to be a context slot.
+ //
+ // There are no parameters in the global scope.
+ ASSERT(!scope_->is_global_scope());
+ frame_->PushParameterAt(i);
+ Result value = frame_->Pop();
+ value.ToRegister();
+
+ // SlotOperand loads context.reg() with the context object
+ // stored to, used below in RecordWrite.
+ Result context = allocator_->Allocate();
+ ASSERT(context.is_valid());
+ __ mov(SlotOperand(slot, context.reg()), value.reg());
+ int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
+ Result scratch = allocator_->Allocate();
+ ASSERT(scratch.is_valid());
+ frame_->Spill(context.reg());
+ frame_->Spill(value.reg());
+ __ RecordWrite(context.reg(), offset, value.reg(), scratch.reg());
+ }
}
}
- }
- // Store the arguments object. This must happen after context
- // initialization because the arguments object may be stored in
- // the context.
- if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) {
- StoreArgumentsObject(true);
- }
+ // Store the arguments object. This must happen after context
+ // initialization because the arguments object may be stored in
+ // the context.
+ if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) {
+ StoreArgumentsObject(true);
+ }
- // Initialize ThisFunction reference if present.
- if (scope_->is_function_scope() && scope_->function() != NULL) {
- frame_->Push(Factory::the_hole_value());
- StoreToSlot(scope_->function()->slot(), NOT_CONST_INIT);
+ // Initialize ThisFunction reference if present.
+ if (scope_->is_function_scope() && scope_->function() != NULL) {
+ frame_->Push(Factory::the_hole_value());
+ StoreToSlot(scope_->function()->slot(), NOT_CONST_INIT);
+ }
+ } else {
+ // When used as the secondary compiler for splitting, ebp, esi,
+ // and edi have been pushed on the stack. Adjust the virtual
+ // frame to match this state.
+ frame_->Adjust(3);
+ allocator_->Unuse(edi);
}
+ // Initialize the function return target after the locals are set
+ // up, because it needs the expected frame height from the frame.
+ function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
+ function_return_is_shadowed_ = false;
+
// Generate code to 'execute' declarations and initialize functions
// (source elements). In case of an illegal redeclaration we need to
// handle that instead of processing the declarations.
@@ -321,7 +333,7 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) {
}
// Adjust for function-level loop nesting.
- loop_nesting_ -= fun->loop_nesting();
+ loop_nesting_ -= info->loop_nesting();
// Code generation state must be reset.
ASSERT(state_ == NULL);
@@ -639,15 +651,22 @@ Result CodeGenerator::StoreArgumentsObject(bool initial) {
return frame_->Pop();
}
+//------------------------------------------------------------------------------
+// CodeGenerator implementation of variables, lookups, and stores.
-Reference::Reference(CodeGenerator* cgen, Expression* expression)
- : cgen_(cgen), expression_(expression), type_(ILLEGAL) {
+Reference::Reference(CodeGenerator* cgen,
+ Expression* expression,
+ bool persist_after_get)
+ : cgen_(cgen),
+ expression_(expression),
+ type_(ILLEGAL),
+ persist_after_get_(persist_after_get) {
cgen->LoadReference(this);
}
Reference::~Reference() {
- cgen_->UnloadReference(this);
+ ASSERT(is_unloaded() || is_illegal());
}
@@ -697,6 +716,7 @@ void CodeGenerator::UnloadReference(Reference* ref) {
// Pop a reference from the stack while preserving TOS.
Comment cmnt(masm_, "[ UnloadReference");
frame_->Nip(ref->size());
+ ref->set_unloaded();
}
@@ -743,6 +763,12 @@ void CodeGenerator::ToBoolean(ControlDestination* dest) {
class FloatingPointHelper : public AllStatic {
public:
+
+ enum ArgLocation {
+ ARGS_ON_STACK,
+ ARGS_IN_REGISTERS
+ };
+
// Code pattern for loading a floating point value. Input value must
// be either a smi or a heap number object (fp value). Requirements:
// operand in register number. Returns operand as floating point number
@@ -750,9 +776,16 @@ class FloatingPointHelper : public AllStatic {
static void LoadFloatOperand(MacroAssembler* masm, Register number);
// Code pattern for loading floating point values. Input values must
// be either smi or heap number objects (fp values). Requirements:
- // operand_1 on TOS+1 , operand_2 on TOS+2; Returns operands as
- // floating point numbers on FPU stack.
- static void LoadFloatOperands(MacroAssembler* masm, Register scratch);
+ // operand_1 on TOS+1 or in edx, operand_2 on TOS+2 or in eax.
+ // Returns operands as floating point numbers on FPU stack.
+ static void LoadFloatOperands(MacroAssembler* masm,
+ Register scratch,
+ ArgLocation arg_location = ARGS_ON_STACK);
+
+ // Similar to LoadFloatOperand but assumes that both operands are smis.
+ // Expects operands in edx, eax.
+ static void LoadFloatSmis(MacroAssembler* masm, Register scratch);
+
// Test if operands are smi or number objects (fp). Requirements:
// operand_1 in eax, operand_2 in edx; falls through on float
// operands, jumps to the non_float label otherwise.
@@ -768,7 +801,11 @@ class FloatingPointHelper : public AllStatic {
// them into xmm0 and xmm1 if they are. Jump to label not_numbers if
// either operand is not a number. Operands are in edx and eax.
// Leaves operands unchanged.
- static void LoadSse2Operands(MacroAssembler* masm, Label* not_numbers);
+ static void LoadSSE2Operands(MacroAssembler* masm, Label* not_numbers);
+
+ // Similar to LoadSSE2Operands but assumes that both operands are smis.
+ // Expects operands in edx, eax.
+ static void LoadSSE2Smis(MacroAssembler* masm, Register scratch);
};
@@ -913,31 +950,6 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op,
return;
}
- // Set the flags based on the operation, type and loop nesting level.
- GenericBinaryFlags flags;
- switch (op) {
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SHL:
- case Token::SHR:
- case Token::SAR:
- // Bit operations always assume they likely operate on Smis. Still only
- // generate the inline Smi check code if this operation is part of a loop.
- flags = (loop_nesting() > 0)
- ? NO_SMI_CODE_IN_STUB
- : NO_GENERIC_BINARY_FLAGS;
- break;
-
- default:
- // By default only inline the Smi check code for likely smis if this
- // operation is part of a loop.
- flags = ((loop_nesting() > 0) && type->IsLikelySmi())
- ? NO_SMI_CODE_IN_STUB
- : NO_GENERIC_BINARY_FLAGS;
- break;
- }
-
Result right = frame_->Pop();
Result left = frame_->Pop();
@@ -971,7 +983,6 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op,
bool left_is_non_smi = left.is_constant() && !left.handle()->IsSmi();
bool right_is_smi = right.is_constant() && right.handle()->IsSmi();
bool right_is_non_smi = right.is_constant() && !right.handle()->IsSmi();
- bool generate_no_smi_code = false; // No smi code at all, inline or in stub.
if (left_is_smi && right_is_smi) {
// Compute the constant result at compile time, and leave it on the frame.
@@ -980,33 +991,31 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op,
if (FoldConstantSmis(op, left_int, right_int)) return;
}
+ Result answer;
if (left_is_non_smi || right_is_non_smi) {
- // Set flag so that we go straight to the slow case, with no smi code.
- generate_no_smi_code = true;
+ // Go straight to the slow case, with no smi code.
+ GenericBinaryOpStub stub(op, overwrite_mode, NO_SMI_CODE_IN_STUB);
+ answer = stub.GenerateCall(masm_, frame_, &left, &right);
} else if (right_is_smi) {
- ConstantSmiBinaryOperation(op, &left, right.handle(),
- type, false, overwrite_mode);
- return;
+ answer = ConstantSmiBinaryOperation(op, &left, right.handle(),
+ type, false, overwrite_mode);
} else if (left_is_smi) {
- ConstantSmiBinaryOperation(op, &right, left.handle(),
- type, true, overwrite_mode);
- return;
- }
-
- if (((flags & NO_SMI_CODE_IN_STUB) != 0) && !generate_no_smi_code) {
- LikelySmiBinaryOperation(op, &left, &right, overwrite_mode);
+ answer = ConstantSmiBinaryOperation(op, &right, left.handle(),
+ type, true, overwrite_mode);
} else {
- frame_->Push(&left);
- frame_->Push(&right);
- // If we know the arguments aren't smis, use the binary operation stub
- // that does not check for the fast smi case.
- if (generate_no_smi_code) {
- flags = NO_SMI_CODE_IN_STUB;
- }
- GenericBinaryOpStub stub(op, overwrite_mode, flags);
- Result answer = frame_->CallStub(&stub, 2);
- frame_->Push(&answer);
+ // Set the flags based on the operation, type and loop nesting level.
+ // Bit operations always assume they likely operate on Smis. Still only
+ // generate the inline Smi check code if this operation is part of a loop.
+ // For all other operations only inline the Smi check code for likely smis
+ // if the operation is part of a loop.
+ if (loop_nesting() > 0 && (Token::IsBitOp(op) || type->IsLikelySmi())) {
+ answer = LikelySmiBinaryOperation(op, &left, &right, overwrite_mode);
+ } else {
+ GenericBinaryOpStub stub(op, overwrite_mode, NO_GENERIC_BINARY_FLAGS);
+ answer = stub.GenerateCall(masm_, frame_, &left, &right);
+ }
}
+ frame_->Push(&answer);
}
@@ -1093,10 +1102,11 @@ bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
// Implements a binary operation using a deferred code object and some
// inline code to operate on smis quickly.
-void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
- Result* left,
- Result* right,
- OverwriteMode overwrite_mode) {
+Result CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
+ Result* left,
+ Result* right,
+ OverwriteMode overwrite_mode) {
+ Result answer;
// Special handling of div and mod because they use fixed registers.
if (op == Token::DIV || op == Token::MOD) {
// We need eax as the quotient register, edx as the remainder
@@ -1218,7 +1228,7 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
deferred->BindExit();
left->Unuse();
right->Unuse();
- frame_->Push(&quotient);
+ answer = quotient;
} else {
ASSERT(op == Token::MOD);
// Check for a negative zero result. If the result is zero, and
@@ -1234,9 +1244,10 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
deferred->BindExit();
left->Unuse();
right->Unuse();
- frame_->Push(&remainder);
+ answer = remainder;
}
- return;
+ ASSERT(answer.is_valid());
+ return answer;
}
// Special handling of shift operations because they use fixed
@@ -1257,7 +1268,7 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
frame_->Spill(ecx);
// Use a fresh answer register to avoid spilling the left operand.
- Result answer = allocator_->Allocate();
+ answer = allocator_->Allocate();
ASSERT(answer.is_valid());
// Check that both operands are smis using the answer register as a
// temporary.
@@ -1321,8 +1332,8 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
deferred->BindExit();
left->Unuse();
right->Unuse();
- frame_->Push(&answer);
- return;
+ ASSERT(answer.is_valid());
+ return answer;
}
// Handle the other binary operations.
@@ -1331,7 +1342,7 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
// A newly allocated register answer is used to hold the answer. The
// registers containing left and right are not modified so they don't
// need to be spilled in the fast case.
- Result answer = allocator_->Allocate();
+ answer = allocator_->Allocate();
ASSERT(answer.is_valid());
// Perform the smi tag check.
@@ -1353,12 +1364,12 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
__ mov(answer.reg(), left->reg());
switch (op) {
case Token::ADD:
- __ add(answer.reg(), Operand(right->reg())); // Add optimistically.
+ __ add(answer.reg(), Operand(right->reg()));
deferred->Branch(overflow);
break;
case Token::SUB:
- __ sub(answer.reg(), Operand(right->reg())); // Subtract optimistically.
+ __ sub(answer.reg(), Operand(right->reg()));
deferred->Branch(overflow);
break;
@@ -1406,7 +1417,8 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
deferred->BindExit();
left->Unuse();
right->Unuse();
- frame_->Push(&answer);
+ ASSERT(answer.is_valid());
+ return answer;
}
@@ -1575,36 +1587,34 @@ void DeferredInlineSmiSub::Generate() {
}
-void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
- Result* operand,
- Handle<Object> value,
- StaticType* type,
- bool reversed,
- OverwriteMode overwrite_mode) {
+Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
+ Result* operand,
+ Handle<Object> value,
+ StaticType* type,
+ bool reversed,
+ OverwriteMode overwrite_mode) {
// NOTE: This is an attempt to inline (a bit) more of the code for
// some possible smi operations (like + and -) when (at least) one
// of the operands is a constant smi.
// Consumes the argument "operand".
-
// TODO(199): Optimize some special cases of operations involving a
// smi literal (multiply by 2, shift by 0, etc.).
if (IsUnsafeSmi(value)) {
Result unsafe_operand(value);
if (reversed) {
- LikelySmiBinaryOperation(op, &unsafe_operand, operand,
- overwrite_mode);
+ return LikelySmiBinaryOperation(op, &unsafe_operand, operand,
+ overwrite_mode);
} else {
- LikelySmiBinaryOperation(op, operand, &unsafe_operand,
- overwrite_mode);
+ return LikelySmiBinaryOperation(op, operand, &unsafe_operand,
+ overwrite_mode);
}
- ASSERT(!operand->is_valid());
- return;
}
// Get the literal value.
Smi* smi_value = Smi::cast(*value);
int int_value = smi_value->value();
+ Result answer;
switch (op) {
case Token::ADD: {
operand->ToRegister();
@@ -1627,13 +1637,12 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
__ test(operand->reg(), Immediate(kSmiTagMask));
deferred->Branch(not_zero);
deferred->BindExit();
- frame_->Push(operand);
+ answer = *operand;
break;
}
case Token::SUB: {
DeferredCode* deferred = NULL;
- Result answer; // Only allocate a new register if reversed.
if (reversed) {
// The reversed case is only hit when the right operand is not a
// constant.
@@ -1661,15 +1670,14 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
deferred->Branch(not_zero);
deferred->BindExit();
operand->Unuse();
- frame_->Push(&answer);
break;
}
case Token::SAR:
if (reversed) {
Result constant_operand(value);
- LikelySmiBinaryOperation(op, &constant_operand, operand,
- overwrite_mode);
+ answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
+ overwrite_mode);
} else {
// Only the least significant 5 bits of the shift value are used.
// In the slow case, this masking is done inside the runtime call.
@@ -1689,21 +1697,21 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
__ and_(operand->reg(), ~kSmiTagMask);
}
deferred->BindExit();
- frame_->Push(operand);
+ answer = *operand;
}
break;
case Token::SHR:
if (reversed) {
Result constant_operand(value);
- LikelySmiBinaryOperation(op, &constant_operand, operand,
- overwrite_mode);
+ answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
+ overwrite_mode);
} else {
// Only the least significant 5 bits of the shift value are used.
// In the slow case, this masking is done inside the runtime call.
int shift_value = int_value & 0x1f;
operand->ToRegister();
- Result answer = allocator()->Allocate();
+ answer = allocator()->Allocate();
ASSERT(answer.is_valid());
DeferredInlineSmiOperation* deferred =
new DeferredInlineSmiOperation(op,
@@ -1724,7 +1732,6 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
operand->Unuse();
__ SmiTag(answer.reg());
deferred->BindExit();
- frame_->Push(&answer);
}
break;
@@ -1749,7 +1756,7 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
}
operand->Unuse();
- Result answer = allocator()->Allocate();
+ answer = allocator()->Allocate();
DeferredInlineSmiOperationReversed* deferred =
new DeferredInlineSmiOperationReversed(op,
answer.reg(),
@@ -1765,7 +1772,6 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
__ SmiTag(answer.reg());
deferred->BindExit();
- frame_->Push(&answer);
} else {
// Only the least significant 5 bits of the shift value are used.
// In the slow case, this masking is done inside the runtime call.
@@ -1783,10 +1789,10 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
__ test(operand->reg(), Immediate(kSmiTagMask));
deferred->Branch(not_zero);
deferred->BindExit();
- frame_->Push(operand);
+ answer = *operand;
} else {
// Use a fresh temporary for nonzero shift values.
- Result answer = allocator()->Allocate();
+ answer = allocator()->Allocate();
ASSERT(answer.is_valid());
DeferredInlineSmiOperation* deferred =
new DeferredInlineSmiOperation(op,
@@ -1808,7 +1814,6 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
deferred->Branch(overflow);
deferred->BindExit();
operand->Unuse();
- frame_->Push(&answer);
}
}
break;
@@ -1847,7 +1852,7 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
}
}
deferred->BindExit();
- frame_->Push(operand);
+ answer = *operand;
break;
}
@@ -1873,7 +1878,7 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
__ and_(operand->reg(), (int_value << kSmiTagSize) - 1);
}
deferred->BindExit();
- frame_->Push(operand);
+ answer = *operand;
break;
}
// Fall through if we did not find a power of 2 on the right hand side!
@@ -1881,16 +1886,17 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
default: {
Result constant_operand(value);
if (reversed) {
- LikelySmiBinaryOperation(op, &constant_operand, operand,
- overwrite_mode);
+ answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
+ overwrite_mode);
} else {
- LikelySmiBinaryOperation(op, operand, &constant_operand,
- overwrite_mode);
+ answer = LikelySmiBinaryOperation(op, operand, &constant_operand,
+ overwrite_mode);
}
break;
}
}
- ASSERT(!operand->is_valid());
+ ASSERT(answer.is_valid());
+ return answer;
}
@@ -2289,6 +2295,7 @@ void CodeGenerator::Comparison(AstNode* node,
// Call the function just below TOS on the stack with the given
// arguments. The receiver is the TOS.
void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
+ CallFunctionFlags flags,
int position) {
// Push the arguments ("left-to-right") on the stack.
int arg_count = args->length();
@@ -2301,7 +2308,7 @@ void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
// Use the shared code stub to call the function.
InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
- CallFunctionStub call_function(arg_count, in_loop);
+ CallFunctionStub call_function(arg_count, in_loop, flags);
Result answer = frame_->CallStub(&call_function, arg_count + 1);
// Restore context and replace function on the stack with the
// result of the stub invocation.
@@ -2310,20 +2317,29 @@ void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
}
-void CodeGenerator::CallApplyLazy(Property* apply,
+void CodeGenerator::CallApplyLazy(Expression* applicand,
Expression* receiver,
VariableProxy* arguments,
int position) {
+ // An optimized implementation of expressions of the form
+ // x.apply(y, arguments).
+ // If the arguments object of the scope has not been allocated,
+ // and x.apply is Function.prototype.apply, this optimization
+ // just copies y and the arguments of the current function on the
+ // stack, as receiver and arguments, and calls x.
+ // In the implementation comments, we call x the applicand
+ // and y the receiver.
ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION);
ASSERT(arguments->IsArguments());
- JumpTarget slow, done;
-
- // Load the apply function onto the stack. This will usually
+ // Load applicand.apply onto the stack. This will usually
// give us a megamorphic load site. Not super, but it works.
- Reference ref(this, apply);
- ref.GetValue();
- ASSERT(ref.type() == Reference::NAMED);
+ Load(applicand);
+ Handle<String> name = Factory::LookupAsciiSymbol("apply");
+ frame()->Push(name);
+ Result answer = frame()->CallLoadIC(RelocInfo::CODE_TARGET);
+ __ nop();
+ frame()->Push(&answer);
// Load the receiver and the existing arguments object onto the
// expression stack. Avoid allocating the arguments object here.
@@ -2333,6 +2349,11 @@ void CodeGenerator::CallApplyLazy(Property* apply,
// Emit the source position information after having loaded the
// receiver and the arguments.
CodeForSourcePosition(position);
+ // Contents of frame at this point:
+ // Frame[0]: arguments object of the current function or the hole.
+ // Frame[1]: receiver
+ // Frame[2]: applicand.apply
+ // Frame[3]: applicand.
// Check if the arguments object has been lazily allocated
// already. If so, just use that instead of copying the arguments
@@ -2340,143 +2361,151 @@ void CodeGenerator::CallApplyLazy(Property* apply,
// named 'arguments' has been introduced.
frame_->Dup();
Result probe = frame_->Pop();
- bool try_lazy = true;
- if (probe.is_constant()) {
- try_lazy = probe.handle()->IsTheHole();
- } else {
- __ cmp(Operand(probe.reg()), Immediate(Factory::the_hole_value()));
- probe.Unuse();
- slow.Branch(not_equal);
- }
-
- if (try_lazy) {
- JumpTarget build_args;
-
- // Get rid of the arguments object probe.
- frame_->Drop();
+ { VirtualFrame::SpilledScope spilled_scope;
+ Label slow, done;
+ bool try_lazy = true;
+ if (probe.is_constant()) {
+ try_lazy = probe.handle()->IsTheHole();
+ } else {
+ __ cmp(Operand(probe.reg()), Immediate(Factory::the_hole_value()));
+ probe.Unuse();
+ __ j(not_equal, &slow);
+ }
- // Before messing with the execution stack, we sync all
- // elements. This is bound to happen anyway because we're
- // about to call a function.
- frame_->SyncRange(0, frame_->element_count() - 1);
+ if (try_lazy) {
+ Label build_args;
+ // Get rid of the arguments object probe.
+ frame_->Drop(); // Can be called on a spilled frame.
+ // Stack now has 3 elements on it.
+ // Contents of stack at this point:
+ // esp[0]: receiver
+ // esp[1]: applicand.apply
+ // esp[2]: applicand.
- // Check that the receiver really is a JavaScript object.
- { frame_->PushElementAt(0);
- Result receiver = frame_->Pop();
- receiver.ToRegister();
- __ test(receiver.reg(), Immediate(kSmiTagMask));
- build_args.Branch(zero);
- Result tmp = allocator_->Allocate();
+ // Check that the receiver really is a JavaScript object.
+ __ mov(eax, Operand(esp, 0));
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &build_args);
// We allow all JSObjects including JSFunctions. As long as
// JS_FUNCTION_TYPE is the last instance type and it is right
// after LAST_JS_OBJECT_TYPE, we do not have to check the upper
// bound.
ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
- __ CmpObjectType(receiver.reg(), FIRST_JS_OBJECT_TYPE, tmp.reg());
- build_args.Branch(less);
- }
-
- // Verify that we're invoking Function.prototype.apply.
- { frame_->PushElementAt(1);
- Result apply = frame_->Pop();
- apply.ToRegister();
- __ test(apply.reg(), Immediate(kSmiTagMask));
- build_args.Branch(zero);
- Result tmp = allocator_->Allocate();
- __ CmpObjectType(apply.reg(), JS_FUNCTION_TYPE, tmp.reg());
- build_args.Branch(not_equal);
- __ mov(tmp.reg(),
- FieldOperand(apply.reg(), JSFunction::kSharedFunctionInfoOffset));
+ __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
+ __ j(below, &build_args);
+
+ // Check that applicand.apply is Function.prototype.apply.
+ __ mov(eax, Operand(esp, kPointerSize));
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &build_args);
+ __ CmpObjectType(eax, JS_FUNCTION_TYPE, ecx);
+ __ j(not_equal, &build_args);
+ __ mov(ecx, FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset));
Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
- __ cmp(FieldOperand(tmp.reg(), SharedFunctionInfo::kCodeOffset),
+ __ cmp(FieldOperand(ecx, SharedFunctionInfo::kCodeOffset),
Immediate(apply_code));
- build_args.Branch(not_equal);
- }
-
- // Get the function receiver from the stack. Check that it
- // really is a function.
- __ mov(edi, Operand(esp, 2 * kPointerSize));
- __ test(edi, Immediate(kSmiTagMask));
- build_args.Branch(zero);
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- build_args.Branch(not_equal);
-
- // Copy the arguments to this function possibly from the
- // adaptor frame below it.
- Label invoke, adapted;
- __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
- __ cmp(Operand(ecx),
- Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(equal, &adapted);
-
- // No arguments adaptor frame. Copy fixed number of arguments.
- __ mov(eax, Immediate(scope_->num_parameters()));
- for (int i = 0; i < scope_->num_parameters(); i++) {
- __ push(frame_->ParameterAt(i));
- }
- __ jmp(&invoke);
-
- // Arguments adaptor frame present. Copy arguments from there, but
- // avoid copying too many arguments to avoid stack overflows.
- __ bind(&adapted);
- static const uint32_t kArgumentsLimit = 1 * KB;
- __ mov(eax, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(eax);
- __ mov(ecx, Operand(eax));
- __ cmp(eax, kArgumentsLimit);
- build_args.Branch(above);
-
- // Loop through the arguments pushing them onto the execution
- // stack. We don't inform the virtual frame of the push, so we don't
- // have to worry about getting rid of the elements from the virtual
- // frame.
- Label loop;
- __ bind(&loop);
- __ test(ecx, Operand(ecx));
- __ j(zero, &invoke);
- __ push(Operand(edx, ecx, times_4, 1 * kPointerSize));
- __ dec(ecx);
- __ jmp(&loop);
-
- // Invoke the function. The virtual frame knows about the receiver
- // so make sure to forget that explicitly.
- __ bind(&invoke);
- ParameterCount actual(eax);
- __ InvokeFunction(edi, actual, CALL_FUNCTION);
- frame_->Forget(1);
- Result result = allocator()->Allocate(eax);
- frame_->SetElementAt(0, &result);
- done.Jump();
-
- // Slow-case: Allocate the arguments object since we know it isn't
- // there, and fall-through to the slow-case where we call
- // Function.prototype.apply.
- build_args.Bind();
- Result arguments_object = StoreArgumentsObject(false);
- frame_->Push(&arguments_object);
- slow.Bind();
- }
+ __ j(not_equal, &build_args);
+
+ // Check that applicand is a function.
+ __ mov(edi, Operand(esp, 2 * kPointerSize));
+ __ test(edi, Immediate(kSmiTagMask));
+ __ j(zero, &build_args);
+ __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
+ __ j(not_equal, &build_args);
+
+ // Copy the arguments to this function possibly from the
+ // adaptor frame below it.
+ Label invoke, adapted;
+ __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
+ __ cmp(Operand(ecx),
+ Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(equal, &adapted);
+
+ // No arguments adaptor frame. Copy fixed number of arguments.
+ __ mov(eax, Immediate(scope_->num_parameters()));
+ for (int i = 0; i < scope_->num_parameters(); i++) {
+ __ push(frame_->ParameterAt(i));
+ }
+ __ jmp(&invoke);
- // Flip the apply function and the function to call on the stack, so
- // the function looks like the receiver of the apply call. This way,
- // the generic Function.prototype.apply implementation can deal with
- // the call like it usually does.
- Result a2 = frame_->Pop();
- Result a1 = frame_->Pop();
- Result ap = frame_->Pop();
- Result fn = frame_->Pop();
- frame_->Push(&ap);
- frame_->Push(&fn);
- frame_->Push(&a1);
- frame_->Push(&a2);
- CallFunctionStub call_function(2, NOT_IN_LOOP);
- Result res = frame_->CallStub(&call_function, 3);
- frame_->Push(&res);
-
- // All done. Restore context register after call.
- if (try_lazy) done.Bind();
+ // Arguments adaptor frame present. Copy arguments from there, but
+ // avoid copying too many arguments to avoid stack overflows.
+ __ bind(&adapted);
+ static const uint32_t kArgumentsLimit = 1 * KB;
+ __ mov(eax, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(eax);
+ __ mov(ecx, Operand(eax));
+ __ cmp(eax, kArgumentsLimit);
+ __ j(above, &build_args);
+
+ // Loop through the arguments pushing them onto the execution
+ // stack. We don't inform the virtual frame of the push, so we don't
+ // have to worry about getting rid of the elements from the virtual
+ // frame.
+ Label loop;
+ // ecx is a small non-negative integer, due to the test above.
+ __ test(ecx, Operand(ecx));
+ __ j(zero, &invoke);
+ __ bind(&loop);
+ __ push(Operand(edx, ecx, times_pointer_size, 1 * kPointerSize));
+ __ dec(ecx);
+ __ j(not_zero, &loop);
+
+ // Invoke the function.
+ __ bind(&invoke);
+ ParameterCount actual(eax);
+ __ InvokeFunction(edi, actual, CALL_FUNCTION);
+ // Drop applicand.apply and applicand from the stack, and push
+ // the result of the function call, but leave the spilled frame
+ // unchanged, with 3 elements, so it is correct when we compile the
+ // slow-case code.
+ __ add(Operand(esp), Immediate(2 * kPointerSize));
+ __ push(eax);
+ // Stack now has 1 element:
+ // esp[0]: result
+ __ jmp(&done);
+
+ // Slow-case: Allocate the arguments object since we know it isn't
+ // there, and fall-through to the slow-case where we call
+ // applicand.apply.
+ __ bind(&build_args);
+ // Stack now has 3 elements, because we have jumped from where:
+ // esp[0]: receiver
+ // esp[1]: applicand.apply
+ // esp[2]: applicand.
+
+ // StoreArgumentsObject requires a correct frame, and may modify it.
+ Result arguments_object = StoreArgumentsObject(false);
+ frame_->SpillAll();
+ arguments_object.ToRegister();
+ frame_->EmitPush(arguments_object.reg());
+ arguments_object.Unuse();
+ // Stack and frame now have 4 elements.
+ __ bind(&slow);
+ }
+
+ // Generic computation of x.apply(y, args) with no special optimization.
+ // Flip applicand.apply and applicand on the stack, so
+ // applicand looks like the receiver of the applicand.apply call.
+ // Then process it as a normal function call.
+ __ mov(eax, Operand(esp, 3 * kPointerSize));
+ __ mov(ebx, Operand(esp, 2 * kPointerSize));
+ __ mov(Operand(esp, 2 * kPointerSize), eax);
+ __ mov(Operand(esp, 3 * kPointerSize), ebx);
+
+ CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS);
+ Result res = frame_->CallStub(&call_function, 3);
+ // The function and its two arguments have been dropped.
+ frame_->Drop(1); // Drop the receiver as well.
+ res.ToRegister();
+ frame_->EmitPush(res.reg());
+ // Stack now has 1 element:
+ // esp[0]: result
+ if (try_lazy) __ bind(&done);
+ } // End of spilled scope.
+ // Restore the context register after a call.
frame_->RestoreContextRegister();
}
@@ -3516,17 +3545,13 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
if (!each.is_illegal()) {
if (each.size() > 0) {
frame_->EmitPush(frame_->ElementAt(each.size()));
- }
- // If the reference was to a slot we rely on the convenient property
- // that it doesn't matter whether a value (eg, ebx pushed above) is
- // right on top of or right underneath a zero-sized reference.
- each.SetValue(NOT_CONST_INIT);
- if (each.size() > 0) {
- // It's safe to pop the value lying on top of the reference before
- // unloading the reference itself (which preserves the top of stack,
- // ie, now the topmost value of the non-zero sized reference), since
- // we will discard the top of stack after unloading the reference
- // anyway.
+ each.SetValue(NOT_CONST_INIT);
+ frame_->Drop(2);
+ } else {
+ // If the reference was to a slot we rely on the convenient property
+ // that it doesn't matter whether a value (eg, ebx pushed above) is
+ // right on top of or right underneath a zero-sized reference.
+ each.SetValue(NOT_CONST_INIT);
frame_->Drop();
}
}
@@ -3534,10 +3559,6 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
// Unloading a reference may leave the frame in an unspilled state.
frame_->SpillAll();
- // Discard the i'th entry pushed above or else the remainder of the
- // reference, whichever is currently on top of the stack.
- frame_->Drop();
-
// Body.
CheckStack(); // TODO(1222600): ignore if body contains calls.
VisitAndSpill(node->body());
@@ -3892,7 +3913,9 @@ void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
#ifdef ENABLE_DEBUGGER_SUPPORT
// Spill everything, even constants, to the frame.
frame_->SpillAll();
- frame_->CallRuntime(Runtime::kDebugBreak, 0);
+
+ DebuggerStatementStub ces;
+ frame_->CallStub(&ces, 0);
// Ignore the return value.
#endif
}
@@ -4461,8 +4484,6 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
Load(property->value());
frame_->Push(key);
Result ignored = frame_->CallStoreIC();
- // Drop the duplicated receiver and ignore the result.
- frame_->Drop();
break;
}
// Fall through
@@ -4587,9 +4608,12 @@ void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
void CodeGenerator::VisitAssignment(Assignment* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
Comment cmnt(masm_, "[ Assignment");
- { Reference target(this, node->target());
+ { Reference target(this, node->target(), node->is_compound());
if (target.is_illegal()) {
// Fool the virtual frame into thinking that we left the assignment's
// value on the frame.
@@ -4611,12 +4635,27 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
frame_->PushElementAt(target.size() - 1);
Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1);
}
+ if (node->ends_initialization_block()) {
+ // Add an extra copy of the receiver to the frame, so that it can be
+ // converted back to fast case after the assignment.
+ ASSERT(target.type() == Reference::NAMED ||
+ target.type() == Reference::KEYED);
+ if (target.type() == Reference::NAMED) {
+ frame_->Dup();
+ // Dup target receiver on stack.
+ } else {
+ ASSERT(target.type() == Reference::KEYED);
+ Result temp = frame_->Pop();
+ frame_->Dup();
+ frame_->Push(&temp);
+ }
+ }
if (node->op() == Token::ASSIGN ||
node->op() == Token::INIT_VAR ||
node->op() == Token::INIT_CONST) {
Load(node->value());
- } else {
+ } else { // Assignment is a compound assignment.
Literal* literal = node->value()->AsLiteral();
bool overwrite_value =
(node->value()->AsBinaryOperation() != NULL &&
@@ -4642,6 +4681,7 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
var->mode() == Variable::CONST &&
node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
// Assignment ignored - leave the value on the stack.
+ UnloadReference(&target);
} else {
CodeForSourcePosition(node->position());
if (node->op() == Token::INIT_CONST) {
@@ -4653,17 +4693,20 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
target.SetValue(NOT_CONST_INIT);
}
if (node->ends_initialization_block()) {
- ASSERT(target.type() == Reference::NAMED ||
- target.type() == Reference::KEYED);
+ ASSERT(target.type() == Reference::UNLOADED);
// End of initialization block. Revert to fast case. The
- // argument to the runtime call is the receiver, which is the
- // first value pushed as part of the reference, which is below
- // the lhs value.
- frame_->PushElementAt(target.size());
+ // argument to the runtime call is the extra copy of the receiver,
+ // which is below the value of the assignment.
+ // Swap the receiver and the value of the assignment expression.
+ Result lhs = frame_->Pop();
+ Result receiver = frame_->Pop();
+ frame_->Push(&lhs);
+ frame_->Push(&receiver);
Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
}
}
}
+ ASSERT(frame_->height() == original_height + 1);
}
@@ -4746,7 +4789,7 @@ void CodeGenerator::VisitCall(Call* node) {
// Call the function.
CodeForSourcePosition(node->position());
InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
- CallFunctionStub call_function(arg_count, in_loop);
+ CallFunctionStub call_function(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
result = frame_->CallStub(&call_function, arg_count + 1);
// Restore the context and overwrite the function on the stack with
@@ -4806,7 +4849,7 @@ void CodeGenerator::VisitCall(Call* node) {
frame_->EmitPush(edx);
// Call the function.
- CallWithArguments(args, node->position());
+ CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
} else if (property != NULL) {
// Check if the key is a literal string.
@@ -4826,7 +4869,7 @@ void CodeGenerator::VisitCall(Call* node) {
args->at(1)->AsVariableProxy()->IsArguments()) {
// Use the optimized Function.prototype.apply that avoids
// allocating lazily allocated arguments objects.
- CallApplyLazy(property,
+ CallApplyLazy(property->obj(),
args->at(0),
args->at(1)->AsVariableProxy(),
node->position());
@@ -4859,20 +4902,25 @@ void CodeGenerator::VisitCall(Call* node) {
// -------------------------------------------
// Load the function to call from the property through a reference.
- Reference ref(this, property);
- ref.GetValue();
// Pass receiver to called function.
if (property->is_synthetic()) {
+ Reference ref(this, property);
+ ref.GetValue();
// Use global object as receiver.
LoadGlobalReceiver();
} else {
- // The reference's size is non-negative.
- frame_->PushElementAt(ref.size());
+ Load(property->obj());
+ Load(property->key());
+ Result function = EmitKeyedLoad(false);
+ frame_->Drop(); // Key.
+ Result receiver = frame_->Pop();
+ frame_->Push(&function);
+ frame_->Push(&receiver);
}
// Call the function.
- CallWithArguments(args, node->position());
+ CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position());
}
} else {
@@ -4887,7 +4935,7 @@ void CodeGenerator::VisitCall(Call* node) {
LoadGlobalReceiver();
// Call the function.
- CallWithArguments(args, node->position());
+ CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
}
}
@@ -5098,7 +5146,7 @@ void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
// flat string in a cons string). If that is not the case we would rather go
// to the runtime system now, to flatten the string.
__ mov(temp.reg(), FieldOperand(object.reg(), ConsString::kSecondOffset));
- __ cmp(Operand(temp.reg()), Immediate(Handle<String>(Heap::empty_string())));
+ __ cmp(Operand(temp.reg()), Factory::empty_string());
__ j(not_equal, &slow_case);
// Get the first of the two strings.
__ mov(object.reg(), FieldOperand(object.reg(), ConsString::kFirstOffset));
@@ -5182,6 +5230,26 @@ void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
}
+void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ Result obj = frame_->Pop();
+ obj.ToRegister();
+ __ test(obj.reg(), Immediate(kSmiTagMask));
+ destination()->false_target()->Branch(zero);
+ Result temp = allocator()->Allocate();
+ ASSERT(temp.is_valid());
+ __ mov(temp.reg(),
+ FieldOperand(obj.reg(), HeapObject::kMapOffset));
+ __ movzx_b(temp.reg(),
+ FieldOperand(temp.reg(), Map::kBitFieldOffset));
+ __ test(temp.reg(), Immediate(1 << Map::kIsUndetectable));
+ obj.Unuse();
+ temp.Unuse();
+ destination()->Split(not_zero);
+}
+
+
void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
ASSERT(args->length() == 0);
@@ -5759,7 +5827,9 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
// value will be in the frame to be spilled.
if (is_postfix) frame_->Push(Smi::FromInt(0));
- { Reference target(this, node->expression());
+ // A constant reference is not saved to, so a constant reference is not a
+ // compound assignment reference.
+ { Reference target(this, node->expression(), !is_const);
if (target.is_illegal()) {
// Spoof the virtual frame to have the expected height (one higher
// than on entry).
@@ -6362,6 +6432,114 @@ void DeferredReferenceSetKeyedValue::Generate() {
}
+Result CodeGenerator::EmitKeyedLoad(bool is_global) {
+ Comment cmnt(masm_, "[ Load from keyed Property");
+ // Inline array load code if inside of a loop. We do not know
+ // the receiver map yet, so we initially generate the code with
+ // a check against an invalid map. In the inline cache code, we
+ // patch the map check if appropriate.
+ if (loop_nesting() > 0) {
+ Comment cmnt(masm_, "[ Inlined load from keyed Property");
+
+ Result key = frame_->Pop();
+ Result receiver = frame_->Pop();
+ key.ToRegister();
+ receiver.ToRegister();
+
+ // Use a fresh temporary to load the elements without destroying
+ // the receiver which is needed for the deferred slow case.
+ Result elements = allocator()->Allocate();
+ ASSERT(elements.is_valid());
+
+ // Use a fresh temporary for the index and later the loaded
+ // value.
+ Result index = allocator()->Allocate();
+ ASSERT(index.is_valid());
+
+ DeferredReferenceGetKeyedValue* deferred =
+ new DeferredReferenceGetKeyedValue(index.reg(),
+ receiver.reg(),
+ key.reg(),
+ is_global);
+
+ // Check that the receiver is not a smi (only needed if this
+ // is not a load from the global context) and that it has the
+ // expected map.
+ if (!is_global) {
+ __ test(receiver.reg(), Immediate(kSmiTagMask));
+ deferred->Branch(zero);
+ }
+
+ // Initially, use an invalid map. The map is patched in the IC
+ // initialization code.
+ __ bind(deferred->patch_site());
+ // Use masm-> here instead of the double underscore macro since extra
+ // coverage code can interfere with the patching.
+ masm_->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
+ Immediate(Factory::null_value()));
+ deferred->Branch(not_equal);
+
+ // Check that the key is a smi.
+ __ test(key.reg(), Immediate(kSmiTagMask));
+ deferred->Branch(not_zero);
+
+ // Get the elements array from the receiver and check that it
+ // is not a dictionary.
+ __ mov(elements.reg(),
+ FieldOperand(receiver.reg(), JSObject::kElementsOffset));
+ __ cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
+ Immediate(Factory::fixed_array_map()));
+ deferred->Branch(not_equal);
+
+ // Shift the key to get the actual index value and check that
+ // it is within bounds.
+ __ mov(index.reg(), key.reg());
+ __ SmiUntag(index.reg());
+ __ cmp(index.reg(),
+ FieldOperand(elements.reg(), FixedArray::kLengthOffset));
+ deferred->Branch(above_equal);
+
+ // Load and check that the result is not the hole. We could
+ // reuse the index or elements register for the value.
+ //
+ // TODO(206): Consider whether it makes sense to try some
+ // heuristic about which register to reuse. For example, if
+ // one is eax, the we can reuse that one because the value
+ // coming from the deferred code will be in eax.
+ Result value = index;
+ __ mov(value.reg(), Operand(elements.reg(),
+ index.reg(),
+ times_4,
+ FixedArray::kHeaderSize - kHeapObjectTag));
+ elements.Unuse();
+ index.Unuse();
+ __ cmp(Operand(value.reg()), Immediate(Factory::the_hole_value()));
+ deferred->Branch(equal);
+ __ IncrementCounter(&Counters::keyed_load_inline, 1);
+
+ deferred->BindExit();
+ // Restore the receiver and key to the frame and push the
+ // result on top of it.
+ frame_->Push(&receiver);
+ frame_->Push(&key);
+ return value;
+ } else {
+ Comment cmnt(masm_, "[ Load from keyed Property");
+ RelocInfo::Mode mode = is_global
+ ? RelocInfo::CODE_TARGET_CONTEXT
+ : RelocInfo::CODE_TARGET;
+ Result answer = frame_->CallKeyedLoadIC(mode);
+ // Make sure that we do not have a test instruction after the
+ // call. A test instruction after the call is used to
+ // indicate that we have generated an inline version of the
+ // keyed load. The explicit nop instruction is here because
+ // the push that follows might be peep-hole optimized away.
+ __ nop();
+ return answer;
+ }
+}
+
+
#undef __
#define __ ACCESS_MASM(masm)
@@ -6474,121 +6652,21 @@ void Reference::GetValue() {
}
case KEYED: {
- Comment cmnt(masm, "[ Load from keyed Property");
Variable* var = expression_->AsVariableProxy()->AsVariable();
bool is_global = var != NULL;
ASSERT(!is_global || var->is_global());
-
- // Inline array load code if inside of a loop. We do not know
- // the receiver map yet, so we initially generate the code with
- // a check against an invalid map. In the inline cache code, we
- // patch the map check if appropriate.
- if (cgen_->loop_nesting() > 0) {
- Comment cmnt(masm, "[ Inlined load from keyed Property");
-
- Result key = cgen_->frame()->Pop();
- Result receiver = cgen_->frame()->Pop();
- key.ToRegister();
- receiver.ToRegister();
-
- // Use a fresh temporary to load the elements without destroying
- // the receiver which is needed for the deferred slow case.
- Result elements = cgen_->allocator()->Allocate();
- ASSERT(elements.is_valid());
-
- // Use a fresh temporary for the index and later the loaded
- // value.
- Result index = cgen_->allocator()->Allocate();
- ASSERT(index.is_valid());
-
- DeferredReferenceGetKeyedValue* deferred =
- new DeferredReferenceGetKeyedValue(index.reg(),
- receiver.reg(),
- key.reg(),
- is_global);
-
- // Check that the receiver is not a smi (only needed if this
- // is not a load from the global context) and that it has the
- // expected map.
- if (!is_global) {
- __ test(receiver.reg(), Immediate(kSmiTagMask));
- deferred->Branch(zero);
- }
-
- // Initially, use an invalid map. The map is patched in the IC
- // initialization code.
- __ bind(deferred->patch_site());
- // Use masm-> here instead of the double underscore macro since extra
- // coverage code can interfere with the patching.
- masm->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
- Immediate(Factory::null_value()));
- deferred->Branch(not_equal);
-
- // Check that the key is a smi.
- __ test(key.reg(), Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
-
- // Get the elements array from the receiver and check that it
- // is not a dictionary.
- __ mov(elements.reg(),
- FieldOperand(receiver.reg(), JSObject::kElementsOffset));
- __ cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
- Immediate(Factory::fixed_array_map()));
- deferred->Branch(not_equal);
-
- // Shift the key to get the actual index value and check that
- // it is within bounds.
- __ mov(index.reg(), key.reg());
- __ SmiUntag(index.reg());
- __ cmp(index.reg(),
- FieldOperand(elements.reg(), FixedArray::kLengthOffset));
- deferred->Branch(above_equal);
-
- // Load and check that the result is not the hole. We could
- // reuse the index or elements register for the value.
- //
- // TODO(206): Consider whether it makes sense to try some
- // heuristic about which register to reuse. For example, if
- // one is eax, the we can reuse that one because the value
- // coming from the deferred code will be in eax.
- Result value = index;
- __ mov(value.reg(), Operand(elements.reg(),
- index.reg(),
- times_4,
- FixedArray::kHeaderSize - kHeapObjectTag));
- elements.Unuse();
- index.Unuse();
- __ cmp(Operand(value.reg()), Immediate(Factory::the_hole_value()));
- deferred->Branch(equal);
- __ IncrementCounter(&Counters::keyed_load_inline, 1);
-
- deferred->BindExit();
- // Restore the receiver and key to the frame and push the
- // result on top of it.
- cgen_->frame()->Push(&receiver);
- cgen_->frame()->Push(&key);
- cgen_->frame()->Push(&value);
-
- } else {
- Comment cmnt(masm, "[ Load from keyed Property");
- RelocInfo::Mode mode = is_global
- ? RelocInfo::CODE_TARGET_CONTEXT
- : RelocInfo::CODE_TARGET;
- Result answer = cgen_->frame()->CallKeyedLoadIC(mode);
- // Make sure that we do not have a test instruction after the
- // call. A test instruction after the call is used to
- // indicate that we have generated an inline version of the
- // keyed load. The explicit nop instruction is here because
- // the push that follows might be peep-hole optimized away.
- __ nop();
- cgen_->frame()->Push(&answer);
- }
+ Result value = cgen_->EmitKeyedLoad(is_global);
+ cgen_->frame()->Push(&value);
break;
}
default:
UNREACHABLE();
}
+
+ if (!persist_after_get_) {
+ cgen_->UnloadReference(this);
+ }
}
@@ -6622,6 +6700,9 @@ void Reference::TakeValue() {
ASSERT(slot->type() == Slot::LOCAL);
cgen_->frame()->TakeLocalAt(slot->index());
}
+
+ ASSERT(persist_after_get_);
+ // Do not unload the reference, because it is used in SetValue.
}
@@ -6635,6 +6716,7 @@ void Reference::SetValue(InitState init_state) {
Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
ASSERT(slot != NULL);
cgen_->StoreToSlot(slot, init_state);
+ cgen_->UnloadReference(this);
break;
}
@@ -6643,6 +6725,7 @@ void Reference::SetValue(InitState init_state) {
cgen_->frame()->Push(GetName());
Result answer = cgen_->frame()->CallStoreIC();
cgen_->frame()->Push(&answer);
+ set_unloaded();
break;
}
@@ -6745,6 +6828,7 @@ void Reference::SetValue(InitState init_state) {
__ nop();
cgen_->frame()->Push(&answer);
}
+ cgen_->UnloadReference(this);
break;
}
@@ -7061,143 +7145,335 @@ void GenericBinaryOpStub::GenerateCall(
}
-void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
- // Perform fast-case smi code for the operation (eax <op> ebx) and
- // leave result in register eax.
+Result GenericBinaryOpStub::GenerateCall(MacroAssembler* masm,
+ VirtualFrame* frame,
+ Result* left,
+ Result* right) {
+ if (ArgsInRegistersSupported()) {
+ SetArgsInRegisters();
+ return frame->CallStub(this, left, right);
+ } else {
+ frame->Push(left);
+ frame->Push(right);
+ return frame->CallStub(this, 2);
+ }
+}
- // Prepare the smi check of both operands by or'ing them together
- // before checking against the smi mask.
- __ mov(ecx, Operand(ebx));
- __ or_(ecx, Operand(eax));
+void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
+ // 1. Move arguments into edx, eax except for DIV and MOD, which need the
+ // dividend in eax and edx free for the division. Use eax, ebx for those.
+ Comment load_comment(masm, "-- Load arguments");
+ Register left = edx;
+ Register right = eax;
+ if (op_ == Token::DIV || op_ == Token::MOD) {
+ left = eax;
+ right = ebx;
+ if (HasArgsInRegisters()) {
+ __ mov(ebx, eax);
+ __ mov(eax, edx);
+ }
+ }
+ if (!HasArgsInRegisters()) {
+ __ mov(right, Operand(esp, 1 * kPointerSize));
+ __ mov(left, Operand(esp, 2 * kPointerSize));
+ }
+
+ // 2. Prepare the smi check of both operands by oring them together.
+ Comment smi_check_comment(masm, "-- Smi check arguments");
+ Label not_smis;
+ Register combined = ecx;
+ ASSERT(!left.is(combined) && !right.is(combined));
switch (op_) {
- case Token::ADD:
- __ add(eax, Operand(ebx)); // add optimistically
- __ j(overflow, slow, not_taken);
+ case Token::BIT_OR:
+ // Perform the operation into eax and smi check the result. Preserve
+ // eax in case the result is not a smi.
+ ASSERT(!left.is(ecx) && !right.is(ecx));
+ __ mov(ecx, right);
+ __ or_(right, Operand(left)); // Bitwise or is commutative.
+ combined = right;
break;
+ case Token::BIT_XOR:
+ case Token::BIT_AND:
+ case Token::ADD:
case Token::SUB:
- __ sub(eax, Operand(ebx)); // subtract optimistically
- __ j(overflow, slow, not_taken);
- break;
-
+ case Token::MUL:
case Token::DIV:
case Token::MOD:
- // Sign extend eax into edx:eax.
- __ cdq();
- // Check for 0 divisor.
- __ test(ebx, Operand(ebx));
- __ j(zero, slow, not_taken);
+ __ mov(combined, right);
+ __ or_(combined, Operand(left));
+ break;
+
+ case Token::SHL:
+ case Token::SAR:
+ case Token::SHR:
+ // Move the right operand into ecx for the shift operation, use eax
+ // for the smi check register.
+ ASSERT(!left.is(ecx) && !right.is(ecx));
+ __ mov(ecx, right);
+ __ or_(right, Operand(left));
+ combined = right;
break;
default:
- // Fall-through to smi check.
break;
}
- // Perform the actual smi check.
- ASSERT(kSmiTag == 0); // adjust zero check if not the case
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, slow, not_taken);
+ // 3. Perform the smi check of the operands.
+ ASSERT(kSmiTag == 0); // Adjust zero check if not the case.
+ __ test(combined, Immediate(kSmiTagMask));
+ __ j(not_zero, &not_smis, not_taken);
+ // 4. Operands are both smis, perform the operation leaving the result in
+ // eax and check the result if necessary.
+ Comment perform_smi(masm, "-- Perform smi operation");
+ Label use_fp_on_smis;
switch (op_) {
+ case Token::BIT_OR:
+ // Nothing to do.
+ break;
+
+ case Token::BIT_XOR:
+ ASSERT(right.is(eax));
+ __ xor_(right, Operand(left)); // Bitwise xor is commutative.
+ break;
+
+ case Token::BIT_AND:
+ ASSERT(right.is(eax));
+ __ and_(right, Operand(left)); // Bitwise and is commutative.
+ break;
+
+ case Token::SHL:
+ // Remove tags from operands (but keep sign).
+ __ SmiUntag(left);
+ __ SmiUntag(ecx);
+ // Perform the operation.
+ __ shl_cl(left);
+ // Check that the *signed* result fits in a smi.
+ __ cmp(left, 0xc0000000);
+ __ j(sign, &use_fp_on_smis, not_taken);
+ // Tag the result and store it in register eax.
+ __ SmiTag(left);
+ __ mov(eax, left);
+ break;
+
+ case Token::SAR:
+ // Remove tags from operands (but keep sign).
+ __ SmiUntag(left);
+ __ SmiUntag(ecx);
+ // Perform the operation.
+ __ sar_cl(left);
+ // Tag the result and store it in register eax.
+ __ SmiTag(left);
+ __ mov(eax, left);
+ break;
+
+ case Token::SHR:
+ // Remove tags from operands (but keep sign).
+ __ SmiUntag(left);
+ __ SmiUntag(ecx);
+ // Perform the operation.
+ __ shr_cl(left);
+ // Check that the *unsigned* result fits in a smi.
+ // Neither of the two high-order bits can be set:
+ // - 0x80000000: high bit would be lost when smi tagging.
+ // - 0x40000000: this number would convert to negative when
+ // Smi tagging these two cases can only happen with shifts
+ // by 0 or 1 when handed a valid smi.
+ __ test(left, Immediate(0xc0000000));
+ __ j(not_zero, slow, not_taken);
+ // Tag the result and store it in register eax.
+ __ SmiTag(left);
+ __ mov(eax, left);
+ break;
+
case Token::ADD:
+ ASSERT(right.is(eax));
+ __ add(right, Operand(left)); // Addition is commutative.
+ __ j(overflow, &use_fp_on_smis, not_taken);
+ break;
+
case Token::SUB:
- // Do nothing here.
+ __ sub(left, Operand(right));
+ __ j(overflow, &use_fp_on_smis, not_taken);
+ __ mov(eax, left);
break;
case Token::MUL:
// If the smi tag is 0 we can just leave the tag on one operand.
- ASSERT(kSmiTag == 0); // adjust code below if not the case
+ ASSERT(kSmiTag == 0); // Adjust code below if not the case.
+ // We can't revert the multiplication if the result is not a smi
+ // so save the right operand.
+ __ mov(ebx, right);
// Remove tag from one of the operands (but keep sign).
- __ SmiUntag(eax);
+ __ SmiUntag(right);
// Do multiplication.
- __ imul(eax, Operand(ebx)); // multiplication of smis; result in eax
- // Go slow on overflows.
- __ j(overflow, slow, not_taken);
- // Check for negative zero result.
- __ NegativeZeroTest(eax, ecx, slow); // use ecx = x | y
+ __ imul(right, Operand(left)); // Multiplication is commutative.
+ __ j(overflow, &use_fp_on_smis, not_taken);
+ // Check for negative zero result. Use combined = left | right.
+ __ NegativeZeroTest(right, combined, &use_fp_on_smis);
break;
case Token::DIV:
- // Divide edx:eax by ebx.
- __ idiv(ebx);
- // Check for the corner case of dividing the most negative smi
- // by -1. We cannot use the overflow flag, since it is not set
- // by idiv instruction.
+ // We can't revert the division if the result is not a smi so
+ // save the left operand.
+ __ mov(edi, left);
+ // Check for 0 divisor.
+ __ test(right, Operand(right));
+ __ j(zero, &use_fp_on_smis, not_taken);
+ // Sign extend left into edx:eax.
+ ASSERT(left.is(eax));
+ __ cdq();
+ // Divide edx:eax by right.
+ __ idiv(right);
+ // Check for the corner case of dividing the most negative smi by
+ // -1. We cannot use the overflow flag, since it is not set by idiv
+ // instruction.
ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
__ cmp(eax, 0x40000000);
- __ j(equal, slow);
- // Check for negative zero result.
- __ NegativeZeroTest(eax, ecx, slow); // use ecx = x | y
+ __ j(equal, &use_fp_on_smis);
+ // Check for negative zero result. Use combined = left | right.
+ __ NegativeZeroTest(eax, combined, &use_fp_on_smis);
// Check that the remainder is zero.
__ test(edx, Operand(edx));
- __ j(not_zero, slow);
+ __ j(not_zero, &use_fp_on_smis);
// Tag the result and store it in register eax.
__ SmiTag(eax);
break;
case Token::MOD:
- // Divide edx:eax by ebx.
- __ idiv(ebx);
- // Check for negative zero result.
- __ NegativeZeroTest(edx, ecx, slow); // use ecx = x | y
+ // Check for 0 divisor.
+ __ test(right, Operand(right));
+ __ j(zero, &not_smis, not_taken);
+
+ // Sign extend left into edx:eax.
+ ASSERT(left.is(eax));
+ __ cdq();
+ // Divide edx:eax by right.
+ __ idiv(right);
+ // Check for negative zero result. Use combined = left | right.
+ __ NegativeZeroTest(edx, combined, slow);
// Move remainder to register eax.
- __ mov(eax, Operand(edx));
+ __ mov(eax, edx);
break;
- case Token::BIT_OR:
- __ or_(eax, Operand(ebx));
- break;
+ default:
+ UNREACHABLE();
+ }
- case Token::BIT_AND:
- __ and_(eax, Operand(ebx));
- break;
+ // 5. Emit return of result in eax.
+ GenerateReturn(masm);
- case Token::BIT_XOR:
- __ xor_(eax, Operand(ebx));
+ // 6. For some operations emit inline code to perform floating point
+ // operations on known smis (e.g., if the result of the operation
+ // overflowed the smi range).
+ switch (op_) {
+ case Token::SHL: {
+ Comment perform_float(masm, "-- Perform float operation on smis");
+ __ bind(&use_fp_on_smis);
+ // Result we want is in left == edx, so we can put the allocated heap
+ // number in eax.
+ __ AllocateHeapNumber(eax, ecx, ebx, slow);
+ // Store the result in the HeapNumber and return.
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope use_sse2(SSE2);
+ __ cvtsi2sd(xmm0, Operand(left));
+ __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+ } else {
+ // It's OK to overwrite the right argument on the stack because we
+ // are about to return.
+ __ mov(Operand(esp, 1 * kPointerSize), left);
+ __ fild_s(Operand(esp, 1 * kPointerSize));
+ __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ }
+ GenerateReturn(masm);
break;
+ }
- case Token::SHL:
- case Token::SHR:
- case Token::SAR:
- // Move the second operand into register ecx.
- __ mov(ecx, Operand(ebx));
- // Remove tags from operands (but keep sign).
- __ SmiUntag(eax);
- __ SmiUntag(ecx);
- // Perform the operation.
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV: {
+ Comment perform_float(masm, "-- Perform float operation on smis");
+ __ bind(&use_fp_on_smis);
+ // Restore arguments to edx, eax.
switch (op_) {
- case Token::SAR:
- __ sar_cl(eax);
- // No checks of result necessary
+ case Token::ADD:
+ // Revert right = right + left.
+ __ sub(right, Operand(left));
break;
- case Token::SHR:
- __ shr_cl(eax);
- // Check that the *unsigned* result fits in a smi.
- // Neither of the two high-order bits can be set:
- // - 0x80000000: high bit would be lost when smi tagging.
- // - 0x40000000: this number would convert to negative when
- // Smi tagging these two cases can only happen with shifts
- // by 0 or 1 when handed a valid smi.
- __ test(eax, Immediate(0xc0000000));
- __ j(not_zero, slow, not_taken);
+ case Token::SUB:
+ // Revert left = left - right.
+ __ add(left, Operand(right));
break;
- case Token::SHL:
- __ shl_cl(eax);
- // Check that the *signed* result fits in a smi.
- __ cmp(eax, 0xc0000000);
- __ j(sign, slow, not_taken);
+ case Token::MUL:
+ // Right was clobbered but a copy is in ebx.
+ __ mov(right, ebx);
+ break;
+ case Token::DIV:
+ // Left was clobbered but a copy is in edi. Right is in ebx for
+ // division.
+ __ mov(edx, edi);
+ __ mov(eax, right);
+ break;
+ default: UNREACHABLE();
break;
- default:
- UNREACHABLE();
}
- // Tag the result and store it in register eax.
- __ SmiTag(eax);
+ __ AllocateHeapNumber(ecx, ebx, no_reg, slow);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope use_sse2(SSE2);
+ FloatingPointHelper::LoadSSE2Smis(masm, ebx);
+ switch (op_) {
+ case Token::ADD: __ addsd(xmm0, xmm1); break;
+ case Token::SUB: __ subsd(xmm0, xmm1); break;
+ case Token::MUL: __ mulsd(xmm0, xmm1); break;
+ case Token::DIV: __ divsd(xmm0, xmm1); break;
+ default: UNREACHABLE();
+ }
+ __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0);
+ } else { // SSE2 not available, use FPU.
+ FloatingPointHelper::LoadFloatSmis(masm, ebx);
+ switch (op_) {
+ case Token::ADD: __ faddp(1); break;
+ case Token::SUB: __ fsubp(1); break;
+ case Token::MUL: __ fmulp(1); break;
+ case Token::DIV: __ fdivp(1); break;
+ default: UNREACHABLE();
+ }
+ __ fstp_d(FieldOperand(ecx, HeapNumber::kValueOffset));
+ }
+ __ mov(eax, ecx);
+ GenerateReturn(masm);
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ // 7. Non-smi operands, fall out to the non-smi code with the operands in
+ // edx and eax.
+ Comment done_comment(masm, "-- Enter non-smi code");
+ __ bind(&not_smis);
+ switch (op_) {
+ case Token::BIT_OR:
+ case Token::SHL:
+ case Token::SAR:
+ case Token::SHR:
+ // Right operand is saved in ecx and eax was destroyed by the smi
+ // check.
+ __ mov(eax, ecx);
+ break;
+
+ case Token::DIV:
+ case Token::MOD:
+ // Operands are in eax, ebx at this point.
+ __ mov(edx, eax);
+ __ mov(eax, ebx);
break;
default:
- UNREACHABLE();
break;
}
}
@@ -7212,30 +7488,20 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
// case smi code is not generated by the caller. Generating it here will speed
// up common operations.
if (HasSmiCodeInStub()) {
- Label slow;
- __ mov(ebx, Operand(esp, 1 * kPointerSize));
- __ mov(eax, Operand(esp, 2 * kPointerSize));
- GenerateSmiCode(masm, &slow);
- GenerateReturn(masm);
- // Too bad. The fast case smi code didn't succeed.
- __ bind(&slow);
+ GenerateSmiCode(masm, &call_runtime);
+ } else if (op_ != Token::MOD) { // MOD goes straight to runtime.
+ GenerateLoadArguments(masm);
}
- // Make sure the arguments are in edx and eax.
- GenerateLoadArguments(masm);
-
// Floating point case.
switch (op_) {
case Token::ADD:
case Token::SUB:
case Token::MUL:
case Token::DIV: {
- // eax: y
- // edx: x
-
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
- FloatingPointHelper::LoadSse2Operands(masm, &call_runtime);
+ FloatingPointHelper::LoadSSE2Operands(masm, &call_runtime);
switch (op_) {
case Token::ADD: __ addsd(xmm0, xmm1); break;
@@ -7244,59 +7510,15 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
case Token::DIV: __ divsd(xmm0, xmm1); break;
default: UNREACHABLE();
}
- // Allocate a heap number, if needed.
- Label skip_allocation;
- switch (mode_) {
- case OVERWRITE_LEFT:
- __ mov(eax, Operand(edx));
- // Fall through!
- case OVERWRITE_RIGHT:
- // If the argument in eax is already an object, we skip the
- // allocation of a heap number.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &skip_allocation, not_taken);
- // Fall through!
- case NO_OVERWRITE: {
- // Allocate a heap number for the result. Keep eax and edx intact
- // for the possible runtime call.
- __ AllocateHeapNumber(ebx, ecx, no_reg, &call_runtime);
- // Now eax can be overwritten losing one of the arguments as we are
- // now done and will not need it any more.
- __ mov(eax, ebx);
- __ bind(&skip_allocation);
- break;
- }
- default: UNREACHABLE();
- }
+ GenerateHeapResultAllocation(masm, &call_runtime);
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
GenerateReturn(masm);
} else { // SSE2 not available, use FPU.
FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx);
- // Allocate a heap number, if needed.
- Label skip_allocation;
- switch (mode_) {
- case OVERWRITE_LEFT:
- __ mov(eax, Operand(edx));
- // Fall through!
- case OVERWRITE_RIGHT:
- // If the argument in eax is already an object, we skip the
- // allocation of a heap number.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &skip_allocation, not_taken);
- // Fall through!
- case NO_OVERWRITE:
- // Allocate a heap number for the result. Keep eax and edx intact
- // for the possible runtime call.
- __ AllocateHeapNumber(ebx, ecx, no_reg, &call_runtime);
- // Now eax can be overwritten losing one of the arguments as we are
- // now done and will not need it any more.
- __ mov(eax, ebx);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
- FloatingPointHelper::LoadFloatOperands(masm, ecx);
-
+ FloatingPointHelper::LoadFloatOperands(
+ masm,
+ ecx,
+ FloatingPointHelper::ARGS_IN_REGISTERS);
switch (op_) {
case Token::ADD: __ faddp(1); break;
case Token::SUB: __ fsubp(1); break;
@@ -7304,8 +7526,13 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
case Token::DIV: __ fdivp(1); break;
default: UNREACHABLE();
}
+ Label after_alloc_failure;
+ GenerateHeapResultAllocation(masm, &after_alloc_failure);
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
GenerateReturn(masm);
+ __ bind(&after_alloc_failure);
+ __ ffree();
+ __ jmp(&call_runtime);
}
}
case Token::MOD: {
@@ -7318,12 +7545,8 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
case Token::SAR:
case Token::SHL:
case Token::SHR: {
- Label non_smi_result, skip_allocation;
- Label operand_conversion_failure;
- FloatingPointHelper::LoadAsIntegers(
- masm,
- use_sse3_,
- &operand_conversion_failure);
+ Label non_smi_result;
+ FloatingPointHelper::LoadAsIntegers(masm, use_sse3_, &call_runtime);
switch (op_) {
case Token::BIT_OR: __ or_(eax, Operand(ecx)); break;
case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
@@ -7336,7 +7559,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
if (op_ == Token::SHR) {
// Check if result is non-negative and fits in a smi.
__ test(eax, Immediate(0xc0000000));
- __ j(not_zero, &non_smi_result);
+ __ j(not_zero, &call_runtime);
} else {
// Check if result fits in a smi.
__ cmp(eax, 0xc0000000);
@@ -7351,6 +7574,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
__ bind(&non_smi_result);
// Allocate a heap number if needed.
__ mov(ebx, Operand(eax)); // ebx: result
+ Label skip_allocation;
switch (mode_) {
case OVERWRITE_LEFT:
case OVERWRITE_RIGHT:
@@ -7379,15 +7603,6 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
}
GenerateReturn(masm);
}
-
- // Go to runtime for non-number inputs.
- __ bind(&operand_conversion_failure);
- // SHR should return uint32 - go to runtime for non-smi/negative result.
- if (op_ == Token::SHR) {
- __ bind(&non_smi_result);
- }
- __ mov(eax, Operand(esp, 1 * kPointerSize));
- __ mov(edx, Operand(esp, 2 * kPointerSize));
break;
}
default: UNREACHABLE(); break;
@@ -7397,9 +7612,9 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
// result. If arguments was passed in registers now place them on the
// stack in the correct order below the return address.
__ bind(&call_runtime);
- if (HasArgumentsInRegisters()) {
+ if (HasArgsInRegisters()) {
__ pop(ecx);
- if (HasArgumentsReversed()) {
+ if (HasArgsReversed()) {
__ push(eax);
__ push(edx);
} else {
@@ -7413,17 +7628,15 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
// Test for string arguments before calling runtime.
Label not_strings, not_string1, string1;
Result answer;
- __ mov(eax, Operand(esp, 2 * kPointerSize)); // First argument.
- __ mov(edx, Operand(esp, 1 * kPointerSize)); // Second argument.
- __ test(eax, Immediate(kSmiTagMask));
+ __ test(edx, Immediate(kSmiTagMask));
__ j(zero, &not_string1);
- __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, eax);
+ __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, ecx);
__ j(above_equal, &not_string1);
- // First argument is a a string, test second.
- __ test(edx, Immediate(kSmiTagMask));
+ // First argument is a string, test second.
+ __ test(eax, Immediate(kSmiTagMask));
__ j(zero, &string1);
- __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, edx);
+ __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ecx);
__ j(above_equal, &string1);
// First and second argument are strings. Jump to the string add stub.
@@ -7432,17 +7645,25 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
// Only first argument is a string.
__ bind(&string1);
- __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION);
+ __ InvokeBuiltin(
+ HasArgsReversed() ?
+ Builtins::STRING_ADD_RIGHT :
+ Builtins::STRING_ADD_LEFT,
+ JUMP_FUNCTION);
// First argument was not a string, test second.
__ bind(&not_string1);
- __ test(edx, Immediate(kSmiTagMask));
+ __ test(eax, Immediate(kSmiTagMask));
__ j(zero, &not_strings);
- __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, edx);
+ __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ecx);
__ j(above_equal, &not_strings);
// Only second argument is a string.
- __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION);
+ __ InvokeBuiltin(
+ HasArgsReversed() ?
+ Builtins::STRING_ADD_LEFT :
+ Builtins::STRING_ADD_RIGHT,
+ JUMP_FUNCTION);
__ bind(&not_strings);
// Neither argument is a string.
@@ -7454,7 +7675,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
break;
case Token::MUL:
__ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
- break;
+ break;
case Token::DIV:
__ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
break;
@@ -7485,9 +7706,57 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
}
+void GenericBinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm,
+ Label* alloc_failure) {
+ Label skip_allocation;
+ OverwriteMode mode = mode_;
+ if (HasArgsReversed()) {
+ if (mode == OVERWRITE_RIGHT) {
+ mode = OVERWRITE_LEFT;
+ } else if (mode == OVERWRITE_LEFT) {
+ mode = OVERWRITE_RIGHT;
+ }
+ }
+ switch (mode) {
+ case OVERWRITE_LEFT: {
+ // If the argument in edx is already an object, we skip the
+ // allocation of a heap number.
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(not_zero, &skip_allocation, not_taken);
+ // Allocate a heap number for the result. Keep eax and edx intact
+ // for the possible runtime call.
+ __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
+ // Now edx can be overwritten losing one of the arguments as we are
+ // now done and will not need it any more.
+ __ mov(edx, Operand(ebx));
+ __ bind(&skip_allocation);
+ // Use object in edx as a result holder
+ __ mov(eax, Operand(edx));
+ break;
+ }
+ case OVERWRITE_RIGHT:
+ // If the argument in eax is already an object, we skip the
+ // allocation of a heap number.
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, &skip_allocation, not_taken);
+ // Fall through!
+ case NO_OVERWRITE:
+ // Allocate a heap number for the result. Keep eax and edx intact
+ // for the possible runtime call.
+ __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
+ // Now eax can be overwritten losing one of the arguments as we are
+ // now done and will not need it any more.
+ __ mov(eax, ebx);
+ __ bind(&skip_allocation);
+ break;
+ default: UNREACHABLE();
+ }
+}
+
+
void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
// If arguments are not passed in registers read them from the stack.
- if (!HasArgumentsInRegisters()) {
+ if (!HasArgsInRegisters()) {
__ mov(eax, Operand(esp, 1 * kPointerSize));
__ mov(edx, Operand(esp, 2 * kPointerSize));
}
@@ -7497,7 +7766,7 @@ void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) {
// If arguments are not passed in registers remove them from the stack before
// returning.
- if (!HasArgumentsInRegisters()) {
+ if (!HasArgsInRegisters()) {
__ ret(2 * kPointerSize); // Remove both operands
} else {
__ ret(0);
@@ -7513,6 +7782,7 @@ void IntegerConvert(MacroAssembler* masm,
Register source,
bool use_sse3,
Label* conversion_failure) {
+ ASSERT(!source.is(ecx) && !source.is(edi) && !source.is(ebx));
Label done, right_exponent, normal_exponent;
Register scratch = ebx;
Register scratch2 = edi;
@@ -7715,7 +7985,7 @@ void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
}
-void FloatingPointHelper::LoadSse2Operands(MacroAssembler* masm,
+void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm,
Label* not_numbers) {
Label load_smi_edx, load_eax, load_smi_eax, load_float_eax, done;
// Load operand in edx into xmm0, or branch to not_numbers.
@@ -7747,16 +8017,40 @@ void FloatingPointHelper::LoadSse2Operands(MacroAssembler* masm,
}
+void FloatingPointHelper::LoadSSE2Smis(MacroAssembler* masm,
+ Register scratch) {
+ const Register left = edx;
+ const Register right = eax;
+ __ mov(scratch, left);
+ ASSERT(!scratch.is(right)); // We're about to clobber scratch.
+ __ SmiUntag(scratch);
+ __ cvtsi2sd(xmm0, Operand(scratch));
+
+ __ mov(scratch, right);
+ __ SmiUntag(scratch);
+ __ cvtsi2sd(xmm1, Operand(scratch));
+}
+
+
void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
- Register scratch) {
+ Register scratch,
+ ArgLocation arg_location) {
Label load_smi_1, load_smi_2, done_load_1, done;
- __ mov(scratch, Operand(esp, 2 * kPointerSize));
+ if (arg_location == ARGS_IN_REGISTERS) {
+ __ mov(scratch, edx);
+ } else {
+ __ mov(scratch, Operand(esp, 2 * kPointerSize));
+ }
__ test(scratch, Immediate(kSmiTagMask));
__ j(zero, &load_smi_1, not_taken);
__ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
__ bind(&done_load_1);
- __ mov(scratch, Operand(esp, 1 * kPointerSize));
+ if (arg_location == ARGS_IN_REGISTERS) {
+ __ mov(scratch, eax);
+ } else {
+ __ mov(scratch, Operand(esp, 1 * kPointerSize));
+ }
__ test(scratch, Immediate(kSmiTagMask));
__ j(zero, &load_smi_2, not_taken);
__ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
@@ -7779,6 +8073,24 @@ void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
}
+void FloatingPointHelper::LoadFloatSmis(MacroAssembler* masm,
+ Register scratch) {
+ const Register left = edx;
+ const Register right = eax;
+ __ mov(scratch, left);
+ ASSERT(!scratch.is(right)); // We're about to clobber scratch.
+ __ SmiUntag(scratch);
+ __ push(scratch);
+ __ fild_s(Operand(esp, 0));
+
+ __ mov(scratch, right);
+ __ SmiUntag(scratch);
+ __ mov(Operand(esp, 0), scratch);
+ __ fild_s(Operand(esp, 0));
+ __ pop(scratch);
+}
+
+
void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
Label* non_float,
Register scratch) {
@@ -8099,8 +8411,12 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
void RegExpExecStub::Generate(MacroAssembler* masm) {
- // Just jump directly to runtime if regexp entry in generated code is turned
- // off.
+ // Just jump directly to runtime if native RegExp is not selected at compile
+ // time or if regexp entry in generated code is turned off runtime switch or
+ // at compilation.
+#ifndef V8_NATIVE_REGEXP
+ __ TailCallRuntime(ExternalReference(Runtime::kRegExpExec), 4, 1);
+#else // V8_NATIVE_REGEXP
if (!FLAG_regexp_entry_native) {
__ TailCallRuntime(ExternalReference(Runtime::kRegExpExec), 4, 1);
return;
@@ -8113,10 +8429,24 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// esp[12]: subject string
// esp[16]: JSRegExp object
- Label runtime;
+ static const int kLastMatchInfoOffset = 1 * kPointerSize;
+ static const int kPreviousIndexOffset = 2 * kPointerSize;
+ static const int kSubjectOffset = 3 * kPointerSize;
+ static const int kJSRegExpOffset = 4 * kPointerSize;
+
+ Label runtime, invoke_regexp;
+
+ // Ensure that a RegExp stack is allocated.
+ ExternalReference address_of_regexp_stack_memory_address =
+ ExternalReference::address_of_regexp_stack_memory_address();
+ ExternalReference address_of_regexp_stack_memory_size =
+ ExternalReference::address_of_regexp_stack_memory_size();
+ __ mov(ebx, Operand::StaticVariable(address_of_regexp_stack_memory_size));
+ __ test(ebx, Operand(ebx));
+ __ j(zero, &runtime, not_taken);
// Check that the first argument is a JSRegExp object.
- __ mov(eax, Operand(esp, 4 * kPointerSize));
+ __ mov(eax, Operand(esp, kJSRegExpOffset));
ASSERT_EQ(0, kSmiTag);
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, &runtime);
@@ -8124,12 +8454,12 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ j(not_equal, &runtime);
// Check that the RegExp has been compiled (data contains a fixed array).
__ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset));
-#ifdef DEBUG
- __ test(ecx, Immediate(kSmiTagMask));
- __ Check(not_zero, "Unexpected type for RegExp data, FixedArray expected");
- __ CmpObjectType(ecx, FIXED_ARRAY_TYPE, ebx);
- __ Check(equal, "Unexpected type for RegExp data, FixedArray expected");
-#endif
+ if (FLAG_debug_code) {
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ Check(not_zero, "Unexpected type for RegExp data, FixedArray expected");
+ __ CmpObjectType(ecx, FIXED_ARRAY_TYPE, ebx);
+ __ Check(equal, "Unexpected type for RegExp data, FixedArray expected");
+ }
// ecx: RegExp data (FixedArray)
// Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
@@ -8152,7 +8482,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// ecx: RegExp data (FixedArray)
// edx: Number of capture registers
// Check that the second argument is a string.
- __ mov(eax, Operand(esp, 3 * kPointerSize));
+ __ mov(eax, Operand(esp, kSubjectOffset));
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, &runtime);
Condition is_string = masm->IsObjectStringType(eax, ebx, ebx);
@@ -8164,18 +8494,17 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// ecx: RegExp data (FixedArray)
// edx: Number of capture registers
// Check that the third argument is a positive smi.
- __ mov(eax, Operand(esp, 2 * kPointerSize));
- __ test(eax, Immediate(kSmiTagMask | 0x80000000));
- __ j(not_zero, &runtime);
- // Check that it is not greater than the subject string length.
+ // Check that the third argument is a positive smi less than the subject
+ // string length. A negative value will be greater (usigned comparison).
+ __ mov(eax, Operand(esp, kPreviousIndexOffset));
__ SmiUntag(eax);
__ cmp(eax, Operand(ebx));
- __ j(greater, &runtime);
+ __ j(above, &runtime);
// ecx: RegExp data (FixedArray)
// edx: Number of capture registers
// Check that the fourth object is a JSArray object.
- __ mov(eax, Operand(esp, 1 * kPointerSize));
+ __ mov(eax, Operand(esp, kLastMatchInfoOffset));
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, &runtime);
__ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
@@ -8193,76 +8522,118 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ j(greater, &runtime);
// ecx: RegExp data (FixedArray)
- // Check the representation and encoding of the subject string (only support
- // flat ascii strings).
- __ mov(eax, Operand(esp, 3 * kPointerSize));
+ // Check the representation and encoding of the subject string.
+ Label seq_string, seq_two_byte_string, check_code;
+ const int kStringRepresentationEncodingMask =
+ kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
+ __ mov(eax, Operand(esp, kSubjectOffset));
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
__ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
- __ and_(ebx, kStringRepresentationMask | kStringEncodingMask);
- __ cmp(ebx, kSeqStringTag | kAsciiStringTag);
+ __ and_(ebx, kStringRepresentationEncodingMask);
+ // First check for sequential string.
+ ASSERT_EQ(0, kStringTag);
+ ASSERT_EQ(0, kSeqStringTag);
+ __ test(Operand(ebx),
+ Immediate(kIsNotStringMask | kStringRepresentationMask));
+ __ j(zero, &seq_string);
+
+ // Check for flat cons string.
+ // A flat cons string is a cons string where the second part is the empty
+ // string. In that case the subject string is just the first part of the cons
+ // string. Also in this case the first part of the cons string is known to be
+ // a sequential string or an external string.
+ __ mov(edx, ebx);
+ __ and_(edx, kStringRepresentationMask);
+ __ cmp(edx, kConsStringTag);
__ j(not_equal, &runtime);
+ __ mov(edx, FieldOperand(eax, ConsString::kSecondOffset));
+ __ cmp(Operand(edx), Factory::empty_string());
+ __ j(not_equal, &runtime);
+ __ mov(eax, FieldOperand(eax, ConsString::kFirstOffset));
+ __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
+ ASSERT_EQ(0, kSeqStringTag);
+ __ test(ebx, Immediate(kStringRepresentationMask));
+ __ j(not_zero, &runtime);
+ __ and_(ebx, kStringRepresentationEncodingMask);
- // ecx: RegExp data (FixedArray)
- // Ensure that a RegExp stack is allocated.
- ExternalReference address_of_regexp_stack_memory_address =
- ExternalReference::address_of_regexp_stack_memory_address();
- ExternalReference address_of_regexp_stack_memory_size =
- ExternalReference::address_of_regexp_stack_memory_size();
- __ mov(eax, Operand::StaticVariable(address_of_regexp_stack_memory_size));
- __ test(eax, Operand(eax));
- __ j(zero, &runtime, not_taken);
-
+ __ bind(&seq_string);
+ // eax: subject string (sequential either ascii to two byte)
+ // ebx: suject string type & kStringRepresentationEncodingMask
// ecx: RegExp data (FixedArray)
// Check that the irregexp code has been generated for an ascii string. If
- // it has the field contains a code object otherwise it contains the hole.
+ // it has, the field contains a code object otherwise it contains the hole.
+ __ cmp(ebx, kStringTag | kSeqStringTag | kTwoByteStringTag);
+ __ j(equal, &seq_two_byte_string);
+ if (FLAG_debug_code) {
+ __ cmp(ebx, kStringTag | kSeqStringTag | kAsciiStringTag);
+ __ Check(equal, "Expected sequential ascii string");
+ }
__ mov(edx, FieldOperand(ecx, JSRegExp::kDataAsciiCodeOffset));
+ __ Set(edi, Immediate(1)); // Type is ascii.
+ __ jmp(&check_code);
+
+ __ bind(&seq_two_byte_string);
+ // eax: subject string
+ // ecx: RegExp data (FixedArray)
+ __ mov(edx, FieldOperand(ecx, JSRegExp::kDataUC16CodeOffset));
+ __ Set(edi, Immediate(0)); // Type is two byte.
+
+ __ bind(&check_code);
+ // Check that the irregexp code has been generated for the actual string
+ // encoding. If it has, the field contains a code object otherwise it contains
+ // the hole.
__ CmpObjectType(edx, CODE_TYPE, ebx);
__ j(not_equal, &runtime);
+ // eax: subject string
+ // edx: code
+ // edi: encoding of subject string (1 if ascii, 0 if two_byte);
// Load used arguments before starting to push arguments for call to native
// RegExp code to avoid handling changing stack height.
- __ mov(eax, Operand(esp, 3 * kPointerSize)); // Subject string.
- __ mov(ebx, Operand(esp, 2 * kPointerSize)); // Previous index.
- __ mov(ecx, Operand(esp, 4 * kPointerSize)); // JSRegExp object.
- __ SmiUntag(ebx); // Previous index from sim.
+ __ mov(ebx, Operand(esp, kPreviousIndexOffset));
+ __ SmiUntag(ebx); // Previous index from smi.
// eax: subject string
// ebx: previous index
// edx: code
+ // edi: encoding of subject string (1 if ascii 0 if two_byte);
// All checks done. Now push arguments for native regexp code.
__ IncrementCounter(&Counters::regexp_entry_native, 1);
- // Argument 8: Indicate that this is a direct call from JavaScript.
+ // Argument 7: Indicate that this is a direct call from JavaScript.
__ push(Immediate(1));
- // Argument 7: Start (high end) of backtracking stack memory area.
+ // Argument 6: Start (high end) of backtracking stack memory area.
__ mov(ecx, Operand::StaticVariable(address_of_regexp_stack_memory_address));
__ add(ecx, Operand::StaticVariable(address_of_regexp_stack_memory_size));
__ push(ecx);
- // Argument 6: At start of string?
- __ xor_(Operand(ecx), ecx); // setcc only operated on cl (lower byte of ecx).
- __ test(ebx, Operand(ebx));
- __ setcc(zero, ecx); // 1 if 0 (start of string), 0 if positive.
- __ push(ecx);
-
// Argument 5: static offsets vector buffer.
__ push(Immediate(ExternalReference::address_of_static_offsets_vector()));
- // Argument 4: End of string data.
- __ mov(ecx, FieldOperand(eax, String::kLengthOffset));
- __ add(ecx, Operand(eax));
- __ add(Operand(ecx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- __ push(ecx);
-
- // Argument 3: Start of string data.
- __ mov(ecx, ebx);
- __ add(ebx, Operand(eax)); // String is ASCII.
- __ add(Operand(ebx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- __ push(ebx);
+ // Argument 4: End of string data
+ // Argument 3: Start of string data
+ Label push_two_byte, push_rest;
+ __ test(edi, Operand(edi));
+ __ mov(edi, FieldOperand(eax, String::kLengthOffset));
+ __ j(zero, &push_two_byte);
+ __ lea(ecx, FieldOperand(eax, edi, times_1, SeqAsciiString::kHeaderSize));
+ __ push(ecx); // Argument 4.
+ __ lea(ecx, FieldOperand(eax, ebx, times_1, SeqAsciiString::kHeaderSize));
+ __ push(ecx); // Argument 3.
+ __ jmp(&push_rest);
+
+ __ bind(&push_two_byte);
+ __ lea(ecx, FieldOperand(eax, edi, times_2, SeqTwoByteString::kHeaderSize));
+ __ push(ecx); // Argument 4.
+ __ lea(ecx, FieldOperand(eax, ebx, times_2, SeqTwoByteString::kHeaderSize));
+ __ push(ecx); // Argument 3.
+
+ __ bind(&push_rest);
// Argument 2: Previous index.
- __ push(ecx);
+ __ push(ebx);
// Argument 1: Subject string.
__ push(eax);
@@ -8271,7 +8642,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ add(Operand(edx), Immediate(Code::kHeaderSize - kHeapObjectTag));
__ call(Operand(edx));
// Remove arguments.
- __ add(Operand(esp), Immediate(8 * kPointerSize));
+ __ add(Operand(esp), Immediate(7 * kPointerSize));
// Check the result.
Label success;
@@ -8286,6 +8657,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Result must now be exception. If there is no pending exception already a
// stack overflow (on the backtrack stack) was detected in RegExp code but
// haven't created the exception yet. Handle that in the runtime system.
+ // TODO(592) Rerunning the RegExp to get the stack overflow exception.
ExternalReference pending_exception(Top::k_pending_exception_address);
__ mov(eax,
Operand::StaticVariable(ExternalReference::the_hole_value_location()));
@@ -8298,15 +8670,17 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Load RegExp data.
__ bind(&success);
- __ mov(eax, Operand(esp, 4 * kPointerSize));
+ __ mov(eax, Operand(esp, kJSRegExpOffset));
__ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset));
__ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset));
// Calculate number of capture registers (number_of_captures + 1) * 2.
+ ASSERT_EQ(0, kSmiTag);
+ ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
__ add(Operand(edx), Immediate(2)); // edx was a smi.
// edx: Number of capture registers
// Load last_match_info which is still known to be a fast case JSArray.
- __ mov(eax, Operand(esp, 1 * kPointerSize));
+ __ mov(eax, Operand(esp, kLastMatchInfoOffset));
__ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset));
// ebx: last_match_info backing store (FixedArray)
@@ -8316,11 +8690,11 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ mov(FieldOperand(ebx, RegExpImpl::kLastCaptureCountOffset), edx);
__ SmiUntag(edx); // Number of capture registers back from smi.
// Store last subject and last input.
- __ mov(eax, Operand(esp, 3 * kPointerSize));
+ __ mov(eax, Operand(esp, kSubjectOffset));
__ mov(FieldOperand(ebx, RegExpImpl::kLastSubjectOffset), eax);
__ mov(ecx, ebx);
__ RecordWrite(ecx, RegExpImpl::kLastSubjectOffset, eax, edi);
- __ mov(eax, Operand(esp, 3 * kPointerSize));
+ __ mov(eax, Operand(esp, kSubjectOffset));
__ mov(FieldOperand(ebx, RegExpImpl::kLastInputOffset), eax);
__ mov(ecx, ebx);
__ RecordWrite(ecx, RegExpImpl::kLastInputOffset, eax, edi);
@@ -8334,14 +8708,14 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// ecx: offsets vector
// edx: number of capture registers
Label next_capture, done;
- __ mov(eax, Operand(esp, 2 * kPointerSize)); // Read previous index.
+ __ mov(eax, Operand(esp, kPreviousIndexOffset));
// Capture register counter starts from number of capture registers and
// counts down until wraping after zero.
__ bind(&next_capture);
__ sub(Operand(edx), Immediate(1));
__ j(negative, &done);
// Read the value from the static offsets vector buffer.
- __ mov(edi, Operand(ecx, edx, times_pointer_size, 0));
+ __ mov(edi, Operand(ecx, edx, times_int_size, 0));
// Perform explicit shift
ASSERT_EQ(0, kSmiTag);
__ shl(edi, kSmiTagSize);
@@ -8361,12 +8735,13 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ bind(&done);
// Return last match info.
- __ mov(eax, Operand(esp, 1 * kPointerSize));
+ __ mov(eax, Operand(esp, kLastMatchInfoOffset));
__ ret(4 * kPointerSize);
// Do the runtime call to execute the regexp.
__ bind(&runtime);
__ TailCallRuntime(ExternalReference(Runtime::kRegExpExec), 4, 1);
+#endif // V8_NATIVE_REGEXP
}
@@ -8519,7 +8894,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
CpuFeatures::Scope use_sse2(SSE2);
CpuFeatures::Scope use_cmov(CMOV);
- FloatingPointHelper::LoadSse2Operands(masm, &check_for_symbols);
+ FloatingPointHelper::LoadSSE2Operands(masm, &check_for_symbols);
__ comisd(xmm0, xmm1);
// Jump to builtin for NaN.
@@ -8581,30 +8956,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ bind(&check_for_strings);
- // Check that both objects are not smis.
- ASSERT_EQ(0, kSmiTag);
- __ mov(ebx, Operand(edx));
- __ and_(ebx, Operand(eax));
- __ test(ebx, Immediate(kSmiTagMask));
- __ j(zero, &call_builtin);
-
- // Load instance type for both objects.
- __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
- __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
-
- // Check that both are flat ascii strings.
- Label non_ascii_flat;
- ASSERT(kNotStringTag != 0);
- const int kFlatAsciiString =
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
- __ and_(ecx, kFlatAsciiString);
- __ cmp(ecx, kStringTag | kSeqStringTag | kAsciiStringTag);
- __ j(not_equal, &call_builtin);
- __ and_(ebx, kFlatAsciiString);
- __ cmp(ebx, kStringTag | kSeqStringTag | kAsciiStringTag);
- __ j(not_equal, &call_builtin);
+ __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx, &call_builtin);
// Inline comparison of ascii strings.
StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
@@ -8681,6 +9033,33 @@ void StackCheckStub::Generate(MacroAssembler* masm) {
void CallFunctionStub::Generate(MacroAssembler* masm) {
Label slow;
+ // If the receiver might be a value (string, number or boolean) check for this
+ // and box it if it is.
+ if (ReceiverMightBeValue()) {
+ // Get the receiver from the stack.
+ // +1 ~ return address
+ Label receiver_is_value, receiver_is_js_object;
+ __ mov(eax, Operand(esp, (argc_ + 1) * kPointerSize));
+
+ // Check if receiver is a smi (which is a number value).
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &receiver_is_value, not_taken);
+
+ // Check if the receiver is a valid JS object.
+ __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, edi);
+ __ j(above_equal, &receiver_is_js_object);
+
+ // Call the runtime to box the value.
+ __ bind(&receiver_is_value);
+ __ EnterInternalFrame();
+ __ push(eax);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ LeaveInternalFrame();
+ __ mov(Operand(esp, (argc_ + 1) * kPointerSize), eax);
+
+ __ bind(&receiver_is_js_object);
+ }
+
// Get the function to call from the stack.
// +2 ~ receiver, return address
__ mov(edi, Operand(esp, (argc_ + 2) * kPointerSize));
@@ -8706,13 +9085,6 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
}
-int CEntryStub::MinorKey() {
- ASSERT(result_size_ <= 2);
- // Result returned in eax, or eax+edx if result_size_ is 2.
- return 0;
-}
-
-
void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
// eax holds the exception.
@@ -8822,7 +9194,6 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
Label* throw_normal_exception,
Label* throw_termination_exception,
Label* throw_out_of_memory_exception,
- ExitFrame::Mode mode,
bool do_gc,
bool always_allocate_scope) {
// eax: result parameter for PerformGC, if any
@@ -8832,6 +9203,8 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// edi: number of arguments including receiver (C callee-saved)
// esi: pointer to the first argument (C callee-saved)
+ // Result returned in eax, or eax+edx if result_size_ is 2.
+
if (do_gc) {
__ mov(Operand(esp, 0 * kPointerSize), eax); // Result.
__ call(FUNCTION_ADDR(Runtime::PerformGC), RelocInfo::RUNTIME_ENTRY);
@@ -8872,7 +9245,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ j(zero, &failure_returned, not_taken);
// Exit the JavaScript to C++ exit frame.
- __ LeaveExitFrame(mode);
+ __ LeaveExitFrame(mode_);
__ ret(0);
// Handling of failure.
@@ -8959,7 +9332,7 @@ void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
}
-void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
+void CEntryStub::Generate(MacroAssembler* masm) {
// eax: number of arguments including receiver
// ebx: pointer to C function (C callee-saved)
// ebp: frame pointer (restored after C call)
@@ -8971,12 +9344,8 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
// of a proper result. The builtin entry handles this by performing
// a garbage collection and retrying the builtin (twice).
- ExitFrame::Mode mode = is_debug_break
- ? ExitFrame::MODE_DEBUG
- : ExitFrame::MODE_NORMAL;
-
// Enter the exit frame that transitions from JavaScript to C++.
- __ EnterExitFrame(mode);
+ __ EnterExitFrame(mode_);
// eax: result parameter for PerformGC, if any (setup below)
// ebx: pointer to builtin function (C callee-saved)
@@ -8994,7 +9363,6 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
&throw_normal_exception,
&throw_termination_exception,
&throw_out_of_memory_exception,
- mode,
false,
false);
@@ -9003,7 +9371,6 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
&throw_normal_exception,
&throw_termination_exception,
&throw_out_of_memory_exception,
- mode,
true,
false);
@@ -9014,7 +9381,6 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
&throw_normal_exception,
&throw_termination_exception,
&throw_out_of_memory_exception,
- mode,
true,
true);
@@ -9631,79 +9997,76 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
Register scratch1,
Register scratch2,
Register scratch3) {
- Label compare_lengths, compare_lengths_1;
-
- // Find minimum length. If either length is zero just compare lengths.
+ Label result_not_equal;
+ Label result_greater;
+ Label compare_lengths;
+ // Find minimum length.
+ Label left_shorter;
__ mov(scratch1, FieldOperand(left, String::kLengthOffset));
- __ test(scratch1, Operand(scratch1));
- __ j(zero, &compare_lengths_1);
- __ mov(scratch2, FieldOperand(right, String::kLengthOffset));
- __ test(scratch2, Operand(scratch2));
- __ j(zero, &compare_lengths_1);
- __ cmp(scratch1, Operand(scratch2));
- if (CpuFeatures::IsSupported(CMOV)) {
- CpuFeatures::Scope use_cmov(CMOV);
- __ cmov(greater, scratch1, Operand(scratch2));
- } else {
- Label l;
- __ j(less, &l);
- __ mov(scratch1, scratch2);
- __ bind(&l);
+ __ mov(scratch3, scratch1);
+ __ sub(scratch3, FieldOperand(right, String::kLengthOffset));
+
+ Register length_delta = scratch3;
+
+ __ j(less_equal, &left_shorter);
+ // Right string is shorter. Change scratch1 to be length of right string.
+ __ sub(scratch1, Operand(length_delta));
+ __ bind(&left_shorter);
+
+ Register min_length = scratch1;
+
+ // If either length is zero, just compare lengths.
+ __ test(min_length, Operand(min_length));
+ __ j(zero, &compare_lengths);
+
+ // Change index to run from -min_length to -1 by adding min_length
+ // to string start. This means that loop ends when index reaches zero,
+ // which doesn't need an additional compare.
+ __ lea(left,
+ FieldOperand(left,
+ min_length, times_1,
+ SeqAsciiString::kHeaderSize));
+ __ lea(right,
+ FieldOperand(right,
+ min_length, times_1,
+ SeqAsciiString::kHeaderSize));
+ __ neg(min_length);
+
+ Register index = min_length; // index = -min_length;
+
+ {
+ // Compare loop.
+ Label loop;
+ __ bind(&loop);
+ // Compare characters.
+ __ mov_b(scratch2, Operand(left, index, times_1, 0));
+ __ cmpb(scratch2, Operand(right, index, times_1, 0));
+ __ j(not_equal, &result_not_equal);
+ __ add(Operand(index), Immediate(1));
+ __ j(not_zero, &loop);
}
- Label result_greater, result_less;
- Label loop;
- // Compare next character.
- __ mov(scratch3, Immediate(-1)); // Index into strings.
- __ bind(&loop);
- // Compare characters.
- Label character_compare_done;
- __ add(Operand(scratch3), Immediate(1));
- __ mov_b(scratch2, Operand(left,
- scratch3,
- times_1,
- SeqAsciiString::kHeaderSize - kHeapObjectTag));
- __ subb(scratch2, Operand(right,
- scratch3,
- times_1,
- SeqAsciiString::kHeaderSize - kHeapObjectTag));
- __ j(not_equal, &character_compare_done);
- __ sub(Operand(scratch1), Immediate(1));
- __ j(not_zero, &loop);
- // If min length characters match compare lengths otherwise last character
- // compare is the result.
- __ bind(&character_compare_done);
- __ j(equal, &compare_lengths);
- __ j(less, &result_less);
- __ jmp(&result_greater);
-
- // Compare lengths.
- Label result_not_equal;
+ // Compare lengths - strings up to min-length are equal.
__ bind(&compare_lengths);
- __ mov(scratch1, FieldOperand(left, String::kLengthOffset));
- __ bind(&compare_lengths_1);
- __ sub(scratch1, FieldOperand(right, String::kLengthOffset));
+ __ test(length_delta, Operand(length_delta));
__ j(not_zero, &result_not_equal);
// Result is EQUAL.
ASSERT_EQ(0, EQUAL);
ASSERT_EQ(0, kSmiTag);
- __ xor_(eax, Operand(eax));
- __ IncrementCounter(&Counters::string_compare_native, 1);
+ __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
__ ret(2 * kPointerSize);
+
__ bind(&result_not_equal);
__ j(greater, &result_greater);
// Result is LESS.
- __ bind(&result_less);
- __ mov(eax, Immediate(Smi::FromInt(LESS)->value()));
- __ IncrementCounter(&Counters::string_compare_native, 1);
+ __ Set(eax, Immediate(Smi::FromInt(LESS)));
__ ret(2 * kPointerSize);
// Result is GREATER.
__ bind(&result_greater);
- __ mov(eax, Immediate(Smi::FromInt(GREATER)->value()));
- __ IncrementCounter(&Counters::string_compare_native, 1);
+ __ Set(eax, Immediate(Smi::FromInt(GREATER)));
__ ret(2 * kPointerSize);
}
@@ -9724,41 +10087,19 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
__ j(not_equal, &not_same);
ASSERT_EQ(0, EQUAL);
ASSERT_EQ(0, kSmiTag);
- __ xor_(eax, Operand(eax));
+ __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
__ IncrementCounter(&Counters::string_compare_native, 1);
__ ret(2 * kPointerSize);
__ bind(&not_same);
- // Check that both objects are not smis.
- ASSERT_EQ(0, kSmiTag);
- __ mov(ebx, Operand(edx));
- __ and_(ebx, Operand(eax));
- __ test(ebx, Immediate(kSmiTagMask));
- __ j(zero, &runtime);
-
- // Load instance type for both strings.
- __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
- __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
-
- // Check that both are flat ascii strings.
- Label non_ascii_flat;
- __ and_(ecx, kStringRepresentationMask | kStringEncodingMask);
- __ cmp(ecx, kSeqStringTag | kAsciiStringTag);
- __ j(not_equal, &non_ascii_flat);
- const int kFlatAsciiString =
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
- __ and_(ebx, kFlatAsciiString);
- __ cmp(ebx, kStringTag | kSeqStringTag | kAsciiStringTag);
- __ j(not_equal, &non_ascii_flat);
+ // Check that both objects are sequential ascii strings.
+ __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx, &runtime);
// Compare flat ascii strings.
+ __ IncrementCounter(&Counters::string_compare_native, 1);
GenerateCompareFlatAsciiStrings(masm, edx, eax, ecx, ebx, edi);
- __ bind(&non_ascii_flat);
-
// Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ bind(&runtime);
diff --git a/deps/v8/src/ia32/codegen-ia32.h b/deps/v8/src/ia32/codegen-ia32.h
index 000222ff1..956f42433 100644
--- a/deps/v8/src/ia32/codegen-ia32.h
+++ b/deps/v8/src/ia32/codegen-ia32.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -32,6 +32,7 @@ namespace v8 {
namespace internal {
// Forward declarations
+class CompilationInfo;
class DeferredCode;
class RegisterAllocator;
class RegisterFile;
@@ -43,57 +44,70 @@ enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
// -------------------------------------------------------------------------
// Reference support
-// A reference is a C++ stack-allocated object that keeps an ECMA
-// reference on the execution stack while in scope. For variables
-// the reference is empty, indicating that it isn't necessary to
-// store state on the stack for keeping track of references to those.
-// For properties, we keep either one (named) or two (indexed) values
-// on the execution stack to represent the reference.
-
+// A reference is a C++ stack-allocated object that puts a
+// reference on the virtual frame. The reference may be consumed
+// by GetValue, TakeValue, SetValue, and Codegen::UnloadReference.
+// When the lifetime (scope) of a valid reference ends, it must have
+// been consumed, and be in state UNLOADED.
class Reference BASE_EMBEDDED {
public:
// The values of the types is important, see size().
- enum Type { ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
- Reference(CodeGenerator* cgen, Expression* expression);
+ enum Type { UNLOADED = -2, ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
+ Reference(CodeGenerator* cgen,
+ Expression* expression,
+ bool persist_after_get = false);
~Reference();
Expression* expression() const { return expression_; }
Type type() const { return type_; }
void set_type(Type value) {
- ASSERT(type_ == ILLEGAL);
+ ASSERT_EQ(ILLEGAL, type_);
type_ = value;
}
+ void set_unloaded() {
+ ASSERT_NE(ILLEGAL, type_);
+ ASSERT_NE(UNLOADED, type_);
+ type_ = UNLOADED;
+ }
// The size the reference takes up on the stack.
- int size() const { return (type_ == ILLEGAL) ? 0 : type_; }
+ int size() const {
+ return (type_ < SLOT) ? 0 : type_;
+ }
bool is_illegal() const { return type_ == ILLEGAL; }
bool is_slot() const { return type_ == SLOT; }
bool is_property() const { return type_ == NAMED || type_ == KEYED; }
+ bool is_unloaded() const { return type_ == UNLOADED; }
// Return the name. Only valid for named property references.
Handle<String> GetName();
// Generate code to push the value of the reference on top of the
// expression stack. The reference is expected to be already on top of
- // the expression stack, and it is left in place with its value above it.
+ // the expression stack, and it is consumed by the call unless the
+ // reference is for a compound assignment.
+ // If the reference is not consumed, it is left in place under its value.
void GetValue();
// Like GetValue except that the slot is expected to be written to before
- // being read from again. Thae value of the reference may be invalidated,
+ // being read from again. The value of the reference may be invalidated,
// causing subsequent attempts to read it to fail.
void TakeValue();
// Generate code to store the value on top of the expression stack in the
// reference. The reference is expected to be immediately below the value
- // on the expression stack. The stored value is left in place (with the
- // reference intact below it) to support chained assignments.
+ // on the expression stack. The value is stored in the location specified
+ // by the reference, and is left on top of the stack, after the reference
+ // is popped from beneath it (unloaded).
void SetValue(InitState init_state);
private:
CodeGenerator* cgen_;
Expression* expression_;
Type type_;
+ // Keep the reference on the stack after get, so it can be used by set later.
+ bool persist_after_get_;
};
@@ -266,7 +280,7 @@ class CodeGenState BASE_EMBEDDED {
// -------------------------------------------------------------------------
-// Arguments allocation mode
+// Arguments allocation mode.
enum ArgumentsAllocationMode {
NO_ARGUMENTS_ALLOCATION,
@@ -280,11 +294,21 @@ enum ArgumentsAllocationMode {
class CodeGenerator: public AstVisitor {
public:
+ // Compilation mode. Either the compiler is used as the primary
+ // compiler and needs to setup everything or the compiler is used as
+ // the secondary compiler for split compilation and has to handle
+ // bailouts.
+ enum Mode {
+ PRIMARY,
+ SECONDARY
+ };
+
// Takes a function literal, generates code for it. This function should only
// be called by compiler.cc.
static Handle<Code> MakeCode(FunctionLiteral* fun,
Handle<Script> script,
- bool is_eval);
+ bool is_eval,
+ CompilationInfo* info);
// Printing of AST, etc. as requested by flags.
static void MakeCodePrologue(FunctionLiteral* fun);
@@ -328,8 +352,7 @@ class CodeGenerator: public AstVisitor {
private:
// Construction/Destruction
- CodeGenerator(int buffer_size, Handle<Script> script, bool is_eval);
- virtual ~CodeGenerator() { delete masm_; }
+ CodeGenerator(MacroAssembler* masm, Handle<Script> script, bool is_eval);
// Accessors
Scope* scope() const { return scope_; }
@@ -365,7 +388,7 @@ class CodeGenerator: public AstVisitor {
void VisitStatementsAndSpill(ZoneList<Statement*>* statements);
// Main code generation function
- void GenCode(FunctionLiteral* fun);
+ void Generate(FunctionLiteral* fun, Mode mode, CompilationInfo* info);
// Generate the return sequence code. Should be called no more than
// once per compiled function, immediately after binding the return
@@ -420,6 +443,11 @@ class CodeGenerator: public AstVisitor {
// value in place.
void StoreToSlot(Slot* slot, InitState init_state);
+ // Load a property of an object, returning it in a Result.
+ // The object and the property name are passed on the stack, and
+ // not changed.
+ Result EmitKeyedLoad(bool is_global);
+
// Special code for typeof expressions: Unfortunately, we must
// be careful when loading the expression in 'typeof'
// expressions. We are not allowed to throw reference errors for
@@ -444,20 +472,20 @@ class CodeGenerator: public AstVisitor {
// Emit code to perform a binary operation on a constant
// smi and a likely smi. Consumes the Result *operand.
- void ConstantSmiBinaryOperation(Token::Value op,
- Result* operand,
- Handle<Object> constant_operand,
- StaticType* type,
- bool reversed,
- OverwriteMode overwrite_mode);
+ Result ConstantSmiBinaryOperation(Token::Value op,
+ Result* operand,
+ Handle<Object> constant_operand,
+ StaticType* type,
+ bool reversed,
+ OverwriteMode overwrite_mode);
// Emit code to perform a binary operation on two likely smis.
// The code to handle smi arguments is produced inline.
// Consumes the Results *left and *right.
- void LikelySmiBinaryOperation(Token::Value op,
- Result* left,
- Result* right,
- OverwriteMode overwrite_mode);
+ Result LikelySmiBinaryOperation(Token::Value op,
+ Result* left,
+ Result* right,
+ OverwriteMode overwrite_mode);
void Comparison(AstNode* node,
Condition cc,
@@ -475,12 +503,14 @@ class CodeGenerator: public AstVisitor {
void StoreUnsafeSmiToLocal(int offset, Handle<Object> value);
void PushUnsafeSmi(Handle<Object> value);
- void CallWithArguments(ZoneList<Expression*>* arguments, int position);
+ void CallWithArguments(ZoneList<Expression*>* arguments,
+ CallFunctionFlags flags,
+ int position);
- // Use an optimized version of Function.prototype.apply that avoid
- // allocating the arguments object and just copies the arguments
- // from the stack.
- void CallApplyLazy(Property* apply,
+ // An optimized implementation of expressions of the form
+ // x.apply(y, arguments). We call x the applicand and y the receiver.
+ // The optimization avoids allocating an arguments object if possible.
+ void CallApplyLazy(Expression* applicand,
Expression* receiver,
VariableProxy* arguments,
int position);
@@ -515,6 +545,7 @@ class CodeGenerator: public AstVisitor {
void GenerateIsArray(ZoneList<Expression*>* args);
void GenerateIsObject(ZoneList<Expression*>* args);
void GenerateIsFunction(ZoneList<Expression*>* args);
+ void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
// Support for construct call checks.
void GenerateIsConstructCall(ZoneList<Expression*>* args);
@@ -612,7 +643,8 @@ class CodeGenerator: public AstVisitor {
friend class Reference;
friend class Result;
friend class FastCodeGenerator;
- friend class CodeGenSelector;
+ friend class FullCodeGenerator;
+ friend class FullCodeGenSyntaxChecker;
friend class CodeGeneratorPatcher; // Used in test-log-stack-tracer.cc
@@ -620,39 +652,6 @@ class CodeGenerator: public AstVisitor {
};
-class CallFunctionStub: public CodeStub {
- public:
- CallFunctionStub(int argc, InLoopFlag in_loop)
- : argc_(argc), in_loop_(in_loop) { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- int argc_;
- InLoopFlag in_loop_;
-
-#ifdef DEBUG
- void Print() { PrintF("CallFunctionStub (args %d)\n", argc_); }
-#endif
-
- Major MajorKey() { return CallFunction; }
- int MinorKey() { return argc_; }
- InLoopFlag InLoop() { return in_loop_; }
-};
-
-
-class ToBooleanStub: public CodeStub {
- public:
- ToBooleanStub() { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- Major MajorKey() { return ToBoolean; }
- int MinorKey() { return 0; }
-};
-
-
// Flag that indicates how to generate code for the stub GenericBinaryOpStub.
enum GenericBinaryFlags {
NO_GENERIC_BINARY_FLAGS = 0,
@@ -682,6 +681,11 @@ class GenericBinaryOpStub: public CodeStub {
void GenerateCall(MacroAssembler* masm, Register left, Smi* right);
void GenerateCall(MacroAssembler* masm, Smi* left, Register right);
+ Result GenerateCall(MacroAssembler* masm,
+ VirtualFrame* frame,
+ Result* left,
+ Result* right);
+
private:
Token::Value op_;
OverwriteMode mode_;
@@ -728,11 +732,11 @@ class GenericBinaryOpStub: public CodeStub {
void GenerateSmiCode(MacroAssembler* masm, Label* slow);
void GenerateLoadArguments(MacroAssembler* masm);
void GenerateReturn(MacroAssembler* masm);
+ void GenerateHeapResultAllocation(MacroAssembler* masm, Label* alloc_failure);
bool ArgsInRegistersSupported() {
- return ((op_ == Token::ADD) || (op_ == Token::SUB)
- || (op_ == Token::MUL) || (op_ == Token::DIV))
- && flags_ != NO_SMI_CODE_IN_STUB;
+ return op_ == Token::ADD || op_ == Token::SUB
+ || op_ == Token::MUL || op_ == Token::DIV;
}
bool IsOperationCommutative() {
return (op_ == Token::ADD) || (op_ == Token::MUL);
@@ -741,8 +745,8 @@ class GenericBinaryOpStub: public CodeStub {
void SetArgsInRegisters() { args_in_registers_ = true; }
void SetArgsReversed() { args_reversed_ = true; }
bool HasSmiCodeInStub() { return (flags_ & NO_SMI_CODE_IN_STUB) == 0; }
- bool HasArgumentsInRegisters() { return args_in_registers_; }
- bool HasArgumentsReversed() { return args_reversed_; }
+ bool HasArgsInRegisters() { return args_in_registers_; }
+ bool HasArgsReversed() { return args_reversed_; }
};
diff --git a/deps/v8/src/ia32/debug-ia32.cc b/deps/v8/src/ia32/debug-ia32.cc
index 5ebe1e070..1f34b3026 100644
--- a/deps/v8/src/ia32/debug-ia32.cc
+++ b/deps/v8/src/ia32/debug-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -94,7 +94,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
__ Set(eax, Immediate(0)); // no arguments
__ mov(ebx, Immediate(ExternalReference::debug_break()));
- CEntryDebugBreakStub ceb;
+ CEntryStub ceb(1, ExitFrame::MODE_DEBUG);
__ CallStub(&ceb);
// Restore the register values containing object pointers from the expression
@@ -132,12 +132,13 @@ void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
- // REgister state for IC store call (from ic-ia32.cc).
+ // Register state for IC store call (from ic-ia32.cc).
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : name
+ // -- edx : receiver
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, eax.bit() | ecx.bit(), false);
+ Generate_DebugBreakCallHelper(masm, eax.bit() | ecx.bit() | edx.bit(), false);
}
diff --git a/deps/v8/src/ia32/disasm-ia32.cc b/deps/v8/src/ia32/disasm-ia32.cc
index 581cdc075..cb500d564 100644
--- a/deps/v8/src/ia32/disasm-ia32.cc
+++ b/deps/v8/src/ia32/disasm-ia32.cc
@@ -53,22 +53,25 @@ struct ByteMnemonic {
static ByteMnemonic two_operands_instr[] = {
{0x03, "add", REG_OPER_OP_ORDER},
- {0x21, "and", OPER_REG_OP_ORDER},
- {0x23, "and", REG_OPER_OP_ORDER},
- {0x3B, "cmp", REG_OPER_OP_ORDER},
- {0x8D, "lea", REG_OPER_OP_ORDER},
{0x09, "or", OPER_REG_OP_ORDER},
{0x0B, "or", REG_OPER_OP_ORDER},
{0x1B, "sbb", REG_OPER_OP_ORDER},
+ {0x21, "and", OPER_REG_OP_ORDER},
+ {0x23, "and", REG_OPER_OP_ORDER},
{0x29, "sub", OPER_REG_OP_ORDER},
{0x2A, "subb", REG_OPER_OP_ORDER},
{0x2B, "sub", REG_OPER_OP_ORDER},
- {0x85, "test", REG_OPER_OP_ORDER},
{0x31, "xor", OPER_REG_OP_ORDER},
{0x33, "xor", REG_OPER_OP_ORDER},
+ {0x38, "cmpb", OPER_REG_OP_ORDER},
+ {0x3A, "cmpb", REG_OPER_OP_ORDER},
+ {0x3B, "cmp", REG_OPER_OP_ORDER},
+ {0x84, "test_b", REG_OPER_OP_ORDER},
+ {0x85, "test", REG_OPER_OP_ORDER},
{0x87, "xchg", REG_OPER_OP_ORDER},
{0x8A, "mov_b", REG_OPER_OP_ORDER},
{0x8B, "mov", REG_OPER_OP_ORDER},
+ {0x8D, "lea", REG_OPER_OP_ORDER},
{-1, "", UNSET_OP_ORDER}
};
diff --git a/deps/v8/src/ia32/fast-codegen-ia32.cc b/deps/v8/src/ia32/fast-codegen-ia32.cc
index fdab58579..2a15733ae 100644
--- a/deps/v8/src/ia32/fast-codegen-ia32.cc
+++ b/deps/v8/src/ia32/fast-codegen-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -28,1684 +28,114 @@
#include "v8.h"
#include "codegen-inl.h"
-#include "compiler.h"
#include "fast-codegen.h"
-#include "parser.h"
-#include "debug.h"
namespace v8 {
namespace internal {
-#define __ ACCESS_MASM(masm_)
+#define __ ACCESS_MASM(masm())
-// Generate code for a JS function. On entry to the function the receiver
-// and arguments have been pushed on the stack left to right, with the
-// return address on top of them. The actual argument count matches the
-// formal parameter count expected by the function.
-//
-// The live registers are:
-// o edi: the JS function object being called (ie, ourselves)
-// o esi: our context
-// o ebp: our caller's frame pointer
-// o esp: stack pointer (pointing to return address)
-//
-// The function builds a JS frame. Please see JavaScriptFrameConstants in
-// frames-ia32.h for its layout.
-void FastCodeGenerator::Generate(FunctionLiteral* fun) {
- function_ = fun;
- SetFunctionPosition(fun);
-
- __ push(ebp); // Caller's frame pointer.
- __ mov(ebp, esp);
- __ push(esi); // Callee's context.
- __ push(edi); // Callee's JS Function.
-
- { Comment cmnt(masm_, "[ Allocate locals");
- int locals_count = fun->scope()->num_stack_slots();
- if (locals_count == 1) {
- __ push(Immediate(Factory::undefined_value()));
- } else if (locals_count > 1) {
- __ mov(eax, Immediate(Factory::undefined_value()));
- for (int i = 0; i < locals_count; i++) {
- __ push(eax);
- }
- }
- }
-
- bool function_in_register = true;
-
- // Possibly allocate a local context.
- if (fun->scope()->num_heap_slots() > 0) {
- Comment cmnt(masm_, "[ Allocate local context");
- // Argument to NewContext is the function, which is still in edi.
- __ push(edi);
- __ CallRuntime(Runtime::kNewContext, 1);
- function_in_register = false;
- // Context is returned in both eax and esi. It replaces the context
- // passed to us. It's saved in the stack and kept live in esi.
- __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
-
- // Copy parameters into context if necessary.
- int num_parameters = fun->scope()->num_parameters();
- for (int i = 0; i < num_parameters; i++) {
- Slot* slot = fun->scope()->parameter(i)->slot();
- if (slot != NULL && slot->type() == Slot::CONTEXT) {
- int parameter_offset = StandardFrameConstants::kCallerSPOffset +
- (num_parameters - 1 - i) * kPointerSize;
- // Load parameter from stack.
- __ mov(eax, Operand(ebp, parameter_offset));
- // Store it in the context
- __ mov(Operand(esi, Context::SlotOffset(slot->index())), eax);
- }
- }
- }
-
- Variable* arguments = fun->scope()->arguments()->AsVariable();
- if (arguments != NULL) {
- // Function uses arguments object.
- Comment cmnt(masm_, "[ Allocate arguments object");
- if (function_in_register) {
- __ push(edi);
- } else {
- __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- }
- // Receiver is just before the parameters on the caller's stack.
- __ lea(edx, Operand(ebp, StandardFrameConstants::kCallerSPOffset +
- fun->num_parameters() * kPointerSize));
- __ push(edx);
- __ push(Immediate(Smi::FromInt(fun->num_parameters())));
- // Arguments to ArgumentsAccessStub:
- // function, receiver address, parameter count.
- // The stub will rewrite receiever and parameter count if the previous
- // stack frame was an arguments adapter frame.
- ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
- __ CallStub(&stub);
- __ mov(ecx, eax); // Duplicate result.
- Move(arguments->slot(), eax, ebx, edx);
- Slot* dot_arguments_slot =
- fun->scope()->arguments_shadow()->AsVariable()->slot();
- Move(dot_arguments_slot, ecx, ebx, edx);
- }
-
-
- { Comment cmnt(masm_, "[ Declarations");
- VisitDeclarations(fun->scope()->declarations());
- }
-
- { Comment cmnt(masm_, "[ Stack check");
- Label ok;
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit();
- __ cmp(esp, Operand::StaticVariable(stack_limit));
- __ j(above_equal, &ok, taken);
- StackCheckStub stub;
- __ CallStub(&stub);
- __ bind(&ok);
- }
-
- if (FLAG_trace) {
- __ CallRuntime(Runtime::kTraceEnter, 0);
- }
-
- { Comment cmnt(masm_, "[ Body");
- ASSERT(loop_depth() == 0);
- VisitStatements(fun->body());
- ASSERT(loop_depth() == 0);
- }
-
- { Comment cmnt(masm_, "[ return <undefined>;");
- // Emit a 'return undefined' in case control fell off the end of the body.
- __ mov(eax, Factory::undefined_value());
- EmitReturnSequence(function_->end_position());
- }
-}
-
-
-void FastCodeGenerator::EmitReturnSequence(int position) {
- Comment cmnt(masm_, "[ Return sequence");
- if (return_label_.is_bound()) {
- __ jmp(&return_label_);
- } else {
- // Common return label
- __ bind(&return_label_);
- if (FLAG_trace) {
- __ push(eax);
- __ CallRuntime(Runtime::kTraceExit, 1);
- }
-#ifdef DEBUG
- // Add a label for checking the size of the code used for returning.
- Label check_exit_codesize;
- masm_->bind(&check_exit_codesize);
-#endif
- CodeGenerator::RecordPositions(masm_, position);
- __ RecordJSReturn();
- // Do not use the leave instruction here because it is too short to
- // patch with the code required by the debugger.
- __ mov(esp, ebp);
- __ pop(ebp);
- __ ret((function_->scope()->num_parameters() + 1) * kPointerSize);
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Check that the size of the code used for returning matches what is
- // expected by the debugger.
- ASSERT_EQ(Assembler::kJSReturnSequenceLength,
- masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
-#endif
- }
-}
-
-
-void FastCodeGenerator::Apply(Expression::Context context,
- Slot* slot,
- Register scratch) {
- switch (context) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect:
- break;
- case Expression::kValue: {
- MemOperand location = EmitSlotSearch(slot, scratch);
- __ push(location);
- break;
- }
- case Expression::kTest:
- case Expression::kValueTest:
- case Expression::kTestValue:
- Move(scratch, slot);
- Apply(context, scratch);
- break;
- }
-}
-
-
-void FastCodeGenerator::Apply(Expression::Context context, Literal* lit) {
- switch (context) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect:
- break;
- case Expression::kValue:
- __ push(Immediate(lit->handle()));
- break;
- case Expression::kTest:
- case Expression::kValueTest:
- case Expression::kTestValue:
- __ mov(eax, lit->handle());
- Apply(context, eax);
- break;
- }
-}
-
-
-void FastCodeGenerator::ApplyTOS(Expression::Context context) {
- switch (context) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect:
- __ Drop(1);
- break;
- case Expression::kValue:
- break;
- case Expression::kTest:
- __ pop(eax);
- TestAndBranch(eax, true_label_, false_label_);
- break;
- case Expression::kValueTest: {
- Label discard;
- __ mov(eax, Operand(esp, 0));
- TestAndBranch(eax, true_label_, &discard);
- __ bind(&discard);
- __ Drop(1);
- __ jmp(false_label_);
- break;
- }
- case Expression::kTestValue: {
- Label discard;
- __ mov(eax, Operand(esp, 0));
- TestAndBranch(eax, &discard, false_label_);
- __ bind(&discard);
- __ Drop(1);
- __ jmp(true_label_);
- }
- }
-}
-
-
-void FastCodeGenerator::DropAndApply(int count,
- Expression::Context context,
- Register reg) {
- ASSERT(count > 0);
- ASSERT(!reg.is(esp));
- switch (context) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect:
- __ Drop(count);
- break;
- case Expression::kValue:
- if (count > 1) __ Drop(count - 1);
- __ mov(Operand(esp, 0), reg);
- break;
- case Expression::kTest:
- __ Drop(count);
- TestAndBranch(reg, true_label_, false_label_);
- break;
- case Expression::kValueTest: {
- Label discard;
- if (count > 1) __ Drop(count - 1);
- __ mov(Operand(esp, 0), reg);
- TestAndBranch(reg, true_label_, &discard);
- __ bind(&discard);
- __ Drop(1);
- __ jmp(false_label_);
- break;
- }
- case Expression::kTestValue: {
- Label discard;
- if (count > 1) __ Drop(count - 1);
- __ mov(Operand(esp, 0), reg);
- TestAndBranch(reg, &discard, false_label_);
- __ bind(&discard);
- __ Drop(1);
- __ jmp(true_label_);
- break;
- }
- }
-}
-
-
-MemOperand FastCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
- switch (slot->type()) {
- case Slot::PARAMETER:
- case Slot::LOCAL:
- return Operand(ebp, SlotOffset(slot));
- case Slot::CONTEXT: {
- int context_chain_length =
- function_->scope()->ContextChainLength(slot->var()->scope());
- __ LoadContext(scratch, context_chain_length);
- return CodeGenerator::ContextOperand(scratch, slot->index());
- }
- case Slot::LOOKUP:
- UNREACHABLE();
- }
- UNREACHABLE();
- return Operand(eax, 0);
-}
-
-
-void FastCodeGenerator::Move(Register destination, Slot* source) {
- MemOperand location = EmitSlotSearch(source, destination);
- __ mov(destination, location);
+void FastCodeGenerator::EmitLoadReceiver(Register reg) {
+ // Offset 2 is due to return address and saved frame pointer.
+ int index = 2 + function()->scope()->num_parameters();
+ __ mov(reg, Operand(ebp, index * kPointerSize));
}
-void FastCodeGenerator::Move(Slot* dst,
- Register src,
- Register scratch1,
- Register scratch2) {
- ASSERT(dst->type() != Slot::LOOKUP); // Not yet implemented.
- ASSERT(!scratch1.is(src) && !scratch2.is(src));
- MemOperand location = EmitSlotSearch(dst, scratch1);
- __ mov(location, src);
- // Emit the write barrier code if the location is in the heap.
- if (dst->type() == Slot::CONTEXT) {
- int offset = FixedArray::kHeaderSize + dst->index() * kPointerSize;
- __ RecordWrite(scratch1, offset, src, scratch2);
+void FastCodeGenerator::EmitReceiverMapCheck() {
+ Comment cmnt(masm(), ";; MapCheck(this)");
+ if (FLAG_print_ir) {
+ PrintF("MapCheck(this)\n");
}
-}
-
-void FastCodeGenerator::TestAndBranch(Register source,
- Label* true_label,
- Label* false_label) {
- ASSERT_NE(NULL, true_label);
- ASSERT_NE(NULL, false_label);
- // Use the shared ToBoolean stub to compile the value in the register into
- // control flow to the code generator's true and false labels. Perform
- // the fast checks assumed by the stub.
- __ cmp(source, Factory::undefined_value()); // The undefined value is false.
- __ j(equal, false_label);
- __ cmp(source, Factory::true_value()); // True is true.
- __ j(equal, true_label);
- __ cmp(source, Factory::false_value()); // False is false.
- __ j(equal, false_label);
- ASSERT_EQ(0, kSmiTag);
- __ test(source, Operand(source)); // The smi zero is false.
- __ j(zero, false_label);
- __ test(source, Immediate(kSmiTagMask)); // All other smis are true.
- __ j(zero, true_label);
+ EmitLoadReceiver(edx);
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(zero, bailout());
- // Call the stub for all other cases.
- __ push(source);
- ToBooleanStub stub;
- __ CallStub(&stub);
- __ test(eax, Operand(eax)); // The stub returns nonzero for true.
- __ j(not_zero, true_label);
- __ jmp(false_label);
+ ASSERT(has_receiver() && receiver()->IsHeapObject());
+ Handle<HeapObject> object = Handle<HeapObject>::cast(receiver());
+ Handle<Map> map(object->map());
+ __ cmp(FieldOperand(edx, HeapObject::kMapOffset), Immediate(map));
+ __ j(not_equal, bailout());
}
-void FastCodeGenerator::VisitDeclaration(Declaration* decl) {
- Comment cmnt(masm_, "[ Declaration");
- Variable* var = decl->proxy()->var();
- ASSERT(var != NULL); // Must have been resolved.
- Slot* slot = var->slot();
- Property* prop = var->AsProperty();
-
- if (slot != NULL) {
- switch (slot->type()) {
- case Slot::PARAMETER:
- case Slot::LOCAL:
- if (decl->mode() == Variable::CONST) {
- __ mov(Operand(ebp, SlotOffset(slot)),
- Immediate(Factory::the_hole_value()));
- } else if (decl->fun() != NULL) {
- Visit(decl->fun());
- __ pop(Operand(ebp, SlotOffset(slot)));
- }
- break;
-
- case Slot::CONTEXT:
- // We bypass the general EmitSlotSearch because we know more about
- // this specific context.
-
- // The variable in the decl always resides in the current context.
- ASSERT_EQ(0, function_->scope()->ContextChainLength(var->scope()));
- if (FLAG_debug_code) {
- // Check if we have the correct context pointer.
- __ mov(ebx,
- CodeGenerator::ContextOperand(esi, Context::FCONTEXT_INDEX));
- __ cmp(ebx, Operand(esi));
- __ Check(equal, "Unexpected declaration in current context.");
- }
- if (decl->mode() == Variable::CONST) {
- __ mov(eax, Immediate(Factory::the_hole_value()));
- __ mov(CodeGenerator::ContextOperand(esi, slot->index()), eax);
- // No write barrier since the hole value is in old space.
- } else if (decl->fun() != NULL) {
- Visit(decl->fun());
- __ pop(eax);
- __ mov(CodeGenerator::ContextOperand(esi, slot->index()), eax);
- int offset = Context::SlotOffset(slot->index());
- __ RecordWrite(esi, offset, eax, ecx);
- }
- break;
-
- case Slot::LOOKUP: {
- __ push(esi);
- __ push(Immediate(var->name()));
- // Declaration nodes are always introduced in one of two modes.
- ASSERT(decl->mode() == Variable::VAR ||
- decl->mode() == Variable::CONST);
- PropertyAttributes attr =
- (decl->mode() == Variable::VAR) ? NONE : READ_ONLY;
- __ push(Immediate(Smi::FromInt(attr)));
- // Push initial value, if any.
- // Note: For variables we must not push an initial value (such as
- // 'undefined') because we may have a (legal) redeclaration and we
- // must not destroy the current value.
- if (decl->mode() == Variable::CONST) {
- __ push(Immediate(Factory::the_hole_value()));
- } else if (decl->fun() != NULL) {
- Visit(decl->fun());
- } else {
- __ push(Immediate(Smi::FromInt(0))); // No initial value!
- }
- __ CallRuntime(Runtime::kDeclareContextSlot, 4);
- break;
- }
- }
-
- } else if (prop != NULL) {
- if (decl->fun() != NULL || decl->mode() == Variable::CONST) {
- // We are declaring a function or constant that rewrites to a
- // property. Use (keyed) IC to set the initial value.
- ASSERT_EQ(Expression::kValue, prop->obj()->context());
- Visit(prop->obj());
- ASSERT_EQ(Expression::kValue, prop->key()->context());
- Visit(prop->key());
-
- if (decl->fun() != NULL) {
- ASSERT_EQ(Expression::kValue, decl->fun()->context());
- Visit(decl->fun());
- __ pop(eax);
- } else {
- __ Set(eax, Immediate(Factory::the_hole_value()));
- }
-
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- // Absence of a test eax instruction following the call
- // indicates that none of the load was inlined.
-
- // Value in eax is ignored (declarations are statements). Receiver
- // and key on stack are discarded.
- __ Drop(2);
- }
- }
-}
-
-
-void FastCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
- // Call the runtime to declare the globals.
- __ push(esi); // The context is the first argument.
- __ push(Immediate(pairs));
- __ push(Immediate(Smi::FromInt(is_eval_ ? 1 : 0)));
- __ CallRuntime(Runtime::kDeclareGlobals, 3);
- // Return value is ignored.
-}
-
-
-void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
- Comment cmnt(masm_, "[ FunctionLiteral");
-
- // Build the function boilerplate and instantiate it.
- Handle<JSFunction> boilerplate =
- Compiler::BuildBoilerplate(expr, script_, this);
- if (HasStackOverflow()) return;
-
- ASSERT(boilerplate->IsBoilerplate());
-
- // Create a new closure.
- __ push(esi);
- __ push(Immediate(boilerplate));
- __ CallRuntime(Runtime::kNewClosure, 2);
- Apply(expr->context(), eax);
-}
-
-
-void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
- Comment cmnt(masm_, "[ VariableProxy");
- EmitVariableLoad(expr->var(), expr->context());
-}
-
-
-void FastCodeGenerator::EmitVariableLoad(Variable* var,
- Expression::Context context) {
- Expression* rewrite = var->rewrite();
- if (rewrite == NULL) {
- ASSERT(var->is_global());
- Comment cmnt(masm_, "Global variable");
- // Use inline caching. Variable name is passed in ecx and the global
- // object on the stack.
- __ push(CodeGenerator::GlobalObject());
- __ mov(ecx, var->name());
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
- // By emitting a nop we make sure that we do not have a test eax
- // instruction after the call it is treated specially by the LoadIC code
- // Remember that the assembler may choose to do peephole optimization
- // (eg, push/pop elimination).
- __ nop();
- DropAndApply(1, context, eax);
- } else if (rewrite->AsSlot() != NULL) {
- Slot* slot = rewrite->AsSlot();
- if (FLAG_debug_code) {
- switch (slot->type()) {
- case Slot::PARAMETER:
- case Slot::LOCAL: {
- Comment cmnt(masm_, "Stack slot");
- break;
- }
- case Slot::CONTEXT: {
- Comment cmnt(masm_, "Context slot");
- break;
- }
- case Slot::LOOKUP:
- UNIMPLEMENTED();
- break;
- }
- }
- Apply(context, slot, eax);
- } else {
- Comment cmnt(masm_, "Variable rewritten to property");
- // A variable has been rewritten into an explicit access to an object
- // property.
- Property* property = rewrite->AsProperty();
- ASSERT_NOT_NULL(property);
-
- // The only property expressions that can occur are of the form
- // "slot[literal]".
-
- // Assert that the object is in a slot.
- Variable* object_var = property->obj()->AsVariableProxy()->AsVariable();
- ASSERT_NOT_NULL(object_var);
- Slot* object_slot = object_var->slot();
- ASSERT_NOT_NULL(object_slot);
-
- // Load the object.
- MemOperand object_loc = EmitSlotSearch(object_slot, eax);
- __ push(object_loc);
-
- // Assert that the key is a smi.
- Literal* key_literal = property->key()->AsLiteral();
- ASSERT_NOT_NULL(key_literal);
- ASSERT(key_literal->handle()->IsSmi());
-
- // Load the key.
- __ push(Immediate(key_literal->handle()));
-
- // Do a keyed property load.
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- // Notice: We must not have a "test eax, ..." instruction after the
- // call. It is treated specially by the LoadIC code.
- __ nop();
- // Drop key and object left on the stack by IC.
- DropAndApply(2, context, eax);
- }
-}
-
-
-void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
- Comment cmnt(masm_, "[ RegExpLiteral");
- Label done;
- // Registers will be used as follows:
- // edi = JS function.
- // ebx = literals array.
- // eax = regexp literal.
- __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ mov(ebx, FieldOperand(edi, JSFunction::kLiteralsOffset));
- int literal_offset =
- FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
- __ mov(eax, FieldOperand(ebx, literal_offset));
- __ cmp(eax, Factory::undefined_value());
- __ j(not_equal, &done);
- // Create regexp literal using runtime function
- // Result will be in eax.
- __ push(ebx);
- __ push(Immediate(Smi::FromInt(expr->literal_index())));
- __ push(Immediate(expr->pattern()));
- __ push(Immediate(expr->flags()));
- __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
- // Label done:
- __ bind(&done);
- Apply(expr->context(), eax);
-}
-
-
-void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
- Comment cmnt(masm_, "[ ObjectLiteral");
- __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(edi, JSFunction::kLiteralsOffset));
- __ push(Immediate(Smi::FromInt(expr->literal_index())));
- __ push(Immediate(expr->constant_properties()));
- if (expr->depth() > 1) {
- __ CallRuntime(Runtime::kCreateObjectLiteral, 3);
- } else {
- __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 3);
- }
-
- // If result_saved == true: The result is saved on top of the
- // stack and in eax.
- // If result_saved == false: The result not on the stack, just in eax.
- bool result_saved = false;
-
- for (int i = 0; i < expr->properties()->length(); i++) {
- ObjectLiteral::Property* property = expr->properties()->at(i);
- if (property->IsCompileTimeValue()) continue;
-
- Literal* key = property->key();
- Expression* value = property->value();
- if (!result_saved) {
- __ push(eax); // Save result on the stack
- result_saved = true;
- }
- switch (property->kind()) {
- case ObjectLiteral::Property::MATERIALIZED_LITERAL: // Fall through.
- ASSERT(!CompileTimeValue::IsCompileTimeValue(value));
- case ObjectLiteral::Property::COMPUTED:
- if (key->handle()->IsSymbol()) {
- Visit(value);
- ASSERT_EQ(Expression::kValue, value->context());
- __ pop(eax);
- __ mov(ecx, Immediate(key->handle()));
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- // StoreIC leaves the receiver on the stack.
- __ mov(eax, Operand(esp, 0)); // Restore result into eax.
- break;
- }
- // Fall through.
- case ObjectLiteral::Property::PROTOTYPE:
- __ push(eax);
- Visit(key);
- ASSERT_EQ(Expression::kValue, key->context());
- Visit(value);
- ASSERT_EQ(Expression::kValue, value->context());
- __ CallRuntime(Runtime::kSetProperty, 3);
- __ mov(eax, Operand(esp, 0)); // Restore result into eax.
- break;
- case ObjectLiteral::Property::SETTER:
- case ObjectLiteral::Property::GETTER:
- __ push(eax);
- Visit(key);
- ASSERT_EQ(Expression::kValue, key->context());
- __ push(Immediate(property->kind() == ObjectLiteral::Property::SETTER ?
- Smi::FromInt(1) :
- Smi::FromInt(0)));
- Visit(value);
- ASSERT_EQ(Expression::kValue, value->context());
- __ CallRuntime(Runtime::kDefineAccessor, 4);
- __ mov(eax, Operand(esp, 0)); // Restore result into eax.
- break;
- default: UNREACHABLE();
- }
- }
- switch (expr->context()) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect:
- if (result_saved) __ Drop(1);
- break;
- case Expression::kValue:
- if (!result_saved) __ push(eax);
- break;
- case Expression::kTest:
- if (result_saved) __ pop(eax);
- TestAndBranch(eax, true_label_, false_label_);
- break;
- case Expression::kValueTest: {
- Label discard;
- if (!result_saved) __ push(eax);
- TestAndBranch(eax, true_label_, &discard);
- __ bind(&discard);
- __ Drop(1);
- __ jmp(false_label_);
- break;
- }
- case Expression::kTestValue: {
- Label discard;
- if (!result_saved) __ push(eax);
- TestAndBranch(eax, &discard, false_label_);
- __ bind(&discard);
- __ Drop(1);
- __ jmp(true_label_);
- break;
- }
- }
-}
-
-
-void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
- Comment cmnt(masm_, "[ ArrayLiteral");
- __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
- __ push(Immediate(Smi::FromInt(expr->literal_index())));
- __ push(Immediate(expr->constant_elements()));
- if (expr->depth() > 1) {
- __ CallRuntime(Runtime::kCreateArrayLiteral, 3);
- } else {
- __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
- }
-
- bool result_saved = false; // Is the result saved to the stack?
-
- // Emit code to evaluate all the non-constant subexpressions and to store
- // them into the newly cloned array.
- ZoneList<Expression*>* subexprs = expr->values();
- for (int i = 0, len = subexprs->length(); i < len; i++) {
- Expression* subexpr = subexprs->at(i);
- // If the subexpression is a literal or a simple materialized literal it
- // is already set in the cloned array.
- if (subexpr->AsLiteral() != NULL ||
- CompileTimeValue::IsCompileTimeValue(subexpr)) {
- continue;
- }
-
- if (!result_saved) {
- __ push(eax);
- result_saved = true;
- }
- Visit(subexpr);
- ASSERT_EQ(Expression::kValue, subexpr->context());
-
- // Store the subexpression value in the array's elements.
- __ pop(eax); // Subexpression value.
- __ mov(ebx, Operand(esp, 0)); // Copy of array literal.
- __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
- int offset = FixedArray::kHeaderSize + (i * kPointerSize);
- __ mov(FieldOperand(ebx, offset), eax);
-
- // Update the write barrier for the array store.
- __ RecordWrite(ebx, offset, eax, ecx);
- }
-
- switch (expr->context()) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect:
- if (result_saved) __ Drop(1);
- break;
- case Expression::kValue:
- if (!result_saved) __ push(eax);
- break;
- case Expression::kTest:
- if (result_saved) __ pop(eax);
- TestAndBranch(eax, true_label_, false_label_);
- break;
- case Expression::kValueTest: {
- Label discard;
- if (!result_saved) __ push(eax);
- TestAndBranch(eax, true_label_, &discard);
- __ bind(&discard);
- __ Drop(1);
- __ jmp(false_label_);
- break;
- }
- case Expression::kTestValue: {
- Label discard;
- if (!result_saved) __ push(eax);
- TestAndBranch(eax, &discard, false_label_);
- __ bind(&discard);
- __ Drop(1);
- __ jmp(true_label_);
- break;
- }
- }
-}
-
-
-void FastCodeGenerator::EmitNamedPropertyLoad(Property* prop,
- Expression::Context context) {
- SetSourcePosition(prop->position());
- Literal* key = prop->key()->AsLiteral();
- __ mov(ecx, Immediate(key->handle()));
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- Apply(context, eax);
-}
-
-
-void FastCodeGenerator::EmitKeyedPropertyLoad(Property* prop,
- Expression::Context context) {
- SetSourcePosition(prop->position());
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- Apply(context, eax);
-}
-
-
-void FastCodeGenerator::EmitCompoundAssignmentOp(Token::Value op,
- Expression::Context context) {
- GenericBinaryOpStub stub(op,
- NO_OVERWRITE,
- NO_GENERIC_BINARY_FLAGS);
- __ CallStub(&stub);
- Apply(context, eax);
-}
-
-
-void FastCodeGenerator::EmitVariableAssignment(Variable* var,
- Expression::Context context) {
- ASSERT(var != NULL);
- ASSERT(var->is_global() || var->slot() != NULL);
- if (var->is_global()) {
- // Assignment to a global variable. Use inline caching for the
- // assignment. Right-hand-side value is passed in eax, variable name in
- // ecx, and the global object on the stack.
- __ pop(eax);
- __ mov(ecx, var->name());
- __ push(CodeGenerator::GlobalObject());
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- // Overwrite the receiver on the stack with the result if needed.
- DropAndApply(1, context, eax);
-
- } else if (var->slot() != NULL) {
- Slot* slot = var->slot();
- switch (slot->type()) {
- case Slot::LOCAL:
- case Slot::PARAMETER: {
- Operand target = Operand(ebp, SlotOffset(slot));
- switch (context) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect:
- // Perform assignment and discard value.
- __ pop(target);
- break;
- case Expression::kValue:
- // Perform assignment and preserve value.
- __ mov(eax, Operand(esp, 0));
- __ mov(target, eax);
- break;
- case Expression::kTest:
- // Perform assignment and test (and discard) value.
- __ pop(eax);
- __ mov(target, eax);
- TestAndBranch(eax, true_label_, false_label_);
- break;
- case Expression::kValueTest: {
- Label discard;
- __ mov(eax, Operand(esp, 0));
- __ mov(target, eax);
- TestAndBranch(eax, true_label_, &discard);
- __ bind(&discard);
- __ Drop(1);
- __ jmp(false_label_);
- break;
- }
- case Expression::kTestValue: {
- Label discard;
- __ mov(eax, Operand(esp, 0));
- __ mov(target, eax);
- TestAndBranch(eax, &discard, false_label_);
- __ bind(&discard);
- __ Drop(1);
- __ jmp(true_label_);
- break;
- }
- }
- break;
- }
-
- case Slot::CONTEXT: {
- MemOperand target = EmitSlotSearch(slot, ecx);
- __ pop(eax);
- __ mov(target, eax);
-
- // RecordWrite may destroy all its register arguments.
- if (context == Expression::kValue) {
- __ push(eax);
- } else if (context != Expression::kEffect) {
- __ mov(edx, eax);
- }
- int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
- __ RecordWrite(ecx, offset, eax, ebx);
- if (context != Expression::kEffect && context != Expression::kValue) {
- Apply(context, edx);
- }
- break;
- }
-
- case Slot::LOOKUP:
- UNREACHABLE();
- break;
- }
- } else {
- // Variables rewritten as properties are not treated as variables in
- // assignments.
- UNREACHABLE();
- }
-}
-
-
-void FastCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
- // Assignment to a property, using a named store IC.
- Property* prop = expr->target()->AsProperty();
- ASSERT(prop != NULL);
- ASSERT(prop->key()->AsLiteral() != NULL);
-
- // If the assignment starts a block of assignments to the same object,
- // change to slow case to avoid the quadratic behavior of repeatedly
- // adding fast properties.
- if (expr->starts_initialization_block()) {
- __ push(Operand(esp, kPointerSize)); // Receiver is under value.
- __ CallRuntime(Runtime::kToSlowProperties, 1);
- }
-
- __ pop(eax);
- __ mov(ecx, prop->key()->AsLiteral()->handle());
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
-
- // If the assignment ends an initialization block, revert to fast case.
- if (expr->ends_initialization_block()) {
- __ push(eax); // Result of assignment, saved even if not needed.
- __ push(Operand(esp, kPointerSize)); // Receiver is under value.
- __ CallRuntime(Runtime::kToFastProperties, 1);
- __ pop(eax);
- }
-
- DropAndApply(1, expr->context(), eax);
-}
-
-
-void FastCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
- // Assignment to a property, using a keyed store IC.
-
- // If the assignment starts a block of assignments to the same object,
- // change to slow case to avoid the quadratic behavior of repeatedly
- // adding fast properties.
- if (expr->starts_initialization_block()) {
- // Reciever is under the key and value.
- __ push(Operand(esp, 2 * kPointerSize));
- __ CallRuntime(Runtime::kToSlowProperties, 1);
- }
-
- __ pop(eax);
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- // This nop signals to the IC that there is no inlined code at the call
- // site for it to patch.
- __ nop();
-
- // If the assignment ends an initialization block, revert to fast case.
- if (expr->ends_initialization_block()) {
- __ push(eax); // Result of assignment, saved even if not needed.
- // Reciever is under the key and value.
- __ push(Operand(esp, 2 * kPointerSize));
- __ CallRuntime(Runtime::kToFastProperties, 1);
- __ pop(eax);
- }
-
- // Receiver and key are still on stack.
- DropAndApply(2, expr->context(), eax);
-}
-
-
-void FastCodeGenerator::VisitProperty(Property* expr) {
- Comment cmnt(masm_, "[ Property");
- Expression* key = expr->key();
-
- // Record the source position for the property load.
- SetSourcePosition(expr->position());
-
- // Evaluate the receiver.
- Visit(expr->obj());
-
- if (key->IsPropertyName()) {
- // Do a named property load. The IC expects the property name in ecx
- // and the receiver on the stack.
- __ mov(ecx, Immediate(key->AsLiteral()->handle()));
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- // By emitting a nop we make sure that we do not have a test eax
- // instruction after the call it is treated specially by the LoadIC code.
- __ nop();
- DropAndApply(1, expr->context(), eax);
- } else {
- // Do a keyed property load.
- Visit(expr->key());
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- // By emitting a nop we make sure that we do not have a "test eax,..."
- // instruction after the call it is treated specially by the LoadIC code.
- __ nop();
- // Drop key left on the stack by IC.
- DropAndApply(2, expr->context(), eax);
- }
-}
-
-
-void FastCodeGenerator::EmitCallWithIC(Call* expr,
- Handle<Object> name,
- RelocInfo::Mode mode) {
- // Code common for calls using the IC.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Visit(args->at(i));
- ASSERT_EQ(Expression::kValue, args->at(i)->context());
- }
- __ Set(ecx, Immediate(name));
- // Record source position of the IC call.
- SetSourcePosition(expr->position());
- InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
- Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count, in_loop);
- __ call(ic, mode);
- // Restore context register.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- Apply(expr->context(), eax);
-}
-
-
-void FastCodeGenerator::EmitCallWithStub(Call* expr) {
- // Code common for calls using the call stub.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Visit(args->at(i));
- }
- // Record source position for debugger.
- SetSourcePosition(expr->position());
- CallFunctionStub stub(arg_count, NOT_IN_LOOP);
- __ CallStub(&stub);
- // Restore context register.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- DropAndApply(1, expr->context(), eax);
-}
-
-
-void FastCodeGenerator::VisitCall(Call* expr) {
- Comment cmnt(masm_, "[ Call");
- Expression* fun = expr->expression();
- Variable* var = fun->AsVariableProxy()->AsVariable();
-
- if (var != NULL && var->is_possibly_eval()) {
- // Call to the identifier 'eval'.
- UNREACHABLE();
- } else if (var != NULL && !var->is_this() && var->is_global()) {
- // Push global object as receiver for the call IC.
- __ push(CodeGenerator::GlobalObject());
- EmitCallWithIC(expr, var->name(), RelocInfo::CODE_TARGET_CONTEXT);
- } else if (var != NULL && var->slot() != NULL &&
- var->slot()->type() == Slot::LOOKUP) {
- // Call to a lookup slot.
- UNREACHABLE();
- } else if (fun->AsProperty() != NULL) {
- // Call to an object property.
- Property* prop = fun->AsProperty();
- Literal* key = prop->key()->AsLiteral();
- if (key != NULL && key->handle()->IsSymbol()) {
- // Call to a named property, use call IC.
- Visit(prop->obj());
- EmitCallWithIC(expr, key->handle(), RelocInfo::CODE_TARGET);
- } else {
- // Call to a keyed property, use keyed load IC followed by function
- // call.
- Visit(prop->obj());
- Visit(prop->key());
- // Record source code position for IC call.
- SetSourcePosition(prop->position());
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- // By emitting a nop we make sure that we do not have a "test eax,..."
- // instruction after the call it is treated specially by the LoadIC code.
- __ nop();
- // Drop key left on the stack by IC.
- __ Drop(1);
- // Pop receiver.
- __ pop(ebx);
- // Push result (function).
- __ push(eax);
- // Push receiver object on stack.
- if (prop->is_synthetic()) {
- __ mov(ecx, CodeGenerator::GlobalObject());
- __ push(FieldOperand(ecx, GlobalObject::kGlobalReceiverOffset));
- } else {
- __ push(ebx);
- }
- EmitCallWithStub(expr);
- }
- } else {
- // Call to some other expression. If the expression is an anonymous
- // function literal not called in a loop, mark it as one that should
- // also use the fast code generator.
- FunctionLiteral* lit = fun->AsFunctionLiteral();
- if (lit != NULL &&
- lit->name()->Equals(Heap::empty_string()) &&
- loop_depth() == 0) {
- lit->set_try_fast_codegen(true);
- }
- Visit(fun);
- // Load global receiver object.
- __ mov(ebx, CodeGenerator::GlobalObject());
- __ push(FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
- // Emit function call.
- EmitCallWithStub(expr);
- }
-}
-
-
-void FastCodeGenerator::VisitCallNew(CallNew* expr) {
- Comment cmnt(masm_, "[ CallNew");
- // According to ECMA-262, section 11.2.2, page 44, the function
- // expression in new calls must be evaluated before the
- // arguments.
- // Push function on the stack.
- Visit(expr->expression());
- ASSERT_EQ(Expression::kValue, expr->expression()->context());
-
- // Push global object (receiver).
+void FastCodeGenerator::EmitGlobalVariableLoad(Handle<String> name) {
+ // Compile global variable accesses as load IC calls. The only live
+ // registers are esi (context) and possibly edx (this). Both are also
+ // saved in the stack and esi is preserved by the call.
__ push(CodeGenerator::GlobalObject());
-
- // Push the arguments ("left-to-right") on the stack.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Visit(args->at(i));
- ASSERT_EQ(Expression::kValue, args->at(i)->context());
- // If location is value, it is already on the stack,
- // so nothing to do here.
- }
-
- // Call the construct call builtin that handles allocation and
- // constructor invocation.
- SetSourcePosition(expr->position());
-
- // Load function, arg_count into edi and eax.
- __ Set(eax, Immediate(arg_count));
- // Function is in esp[arg_count + 1].
- __ mov(edi, Operand(esp, eax, times_pointer_size, kPointerSize));
-
- Handle<Code> construct_builtin(Builtins::builtin(Builtins::JSConstructCall));
- __ call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
-
- // Replace function on TOS with result in eax, or pop it.
- DropAndApply(1, expr->context(), eax);
-}
-
-
-void FastCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
- Comment cmnt(masm_, "[ CallRuntime");
- ZoneList<Expression*>* args = expr->arguments();
-
- if (expr->is_jsruntime()) {
- // Prepare for calling JS runtime function.
- __ mov(eax, CodeGenerator::GlobalObject());
- __ push(FieldOperand(eax, GlobalObject::kBuiltinsOffset));
- }
-
- // Push the arguments ("left-to-right").
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Visit(args->at(i));
- ASSERT_EQ(Expression::kValue, args->at(i)->context());
- }
-
- if (expr->is_jsruntime()) {
- // Call the JS runtime function via a call IC.
- __ Set(ecx, Immediate(expr->name()));
- InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
- Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count, in_loop);
- __ call(ic, RelocInfo::CODE_TARGET);
- // Restore context register.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ mov(ecx, name);
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ if (has_this_properties()) {
+ // Restore this.
+ EmitLoadReceiver(edx);
} else {
- // Call the C runtime function.
- __ CallRuntime(expr->function(), arg_count);
+ __ nop(); // Not test eax, indicates IC has no inlined code at call site.
}
- Apply(expr->context(), eax);
}
-void FastCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
- switch (expr->op()) {
- case Token::VOID: {
- Comment cmnt(masm_, "[ UnaryOperation (VOID)");
- Visit(expr->expression());
- ASSERT_EQ(Expression::kEffect, expr->expression()->context());
- switch (expr->context()) {
- case Expression::kUninitialized:
- UNREACHABLE();
- break;
- case Expression::kEffect:
- break;
- case Expression::kValue:
- __ push(Immediate(Factory::undefined_value()));
- break;
- case Expression::kTestValue:
- // Value is false so it's needed.
- __ push(Immediate(Factory::undefined_value()));
- // Fall through.
- case Expression::kTest:
- case Expression::kValueTest:
- __ jmp(false_label_);
- break;
- }
- break;
- }
+void FastCodeGenerator::EmitThisPropertyStore(Handle<String> name) {
+ LookupResult lookup;
+ receiver()->Lookup(*name, &lookup);
- case Token::NOT: {
- Comment cmnt(masm_, "[ UnaryOperation (NOT)");
- ASSERT_EQ(Expression::kTest, expr->expression()->context());
+ ASSERT(lookup.holder() == *receiver());
+ ASSERT(lookup.type() == FIELD);
+ Handle<Map> map(Handle<HeapObject>::cast(receiver())->map());
+ int index = lookup.GetFieldIndex() - map->inobject_properties();
+ int offset = index * kPointerSize;
- Label push_true, push_false, done;
- switch (expr->context()) {
- case Expression::kUninitialized:
- UNREACHABLE();
- break;
-
- case Expression::kEffect:
- VisitForControl(expr->expression(), &done, &done);
- __ bind(&done);
- break;
-
- case Expression::kValue:
- VisitForControl(expr->expression(), &push_false, &push_true);
- __ bind(&push_true);
- __ push(Immediate(Factory::true_value()));
- __ jmp(&done);
- __ bind(&push_false);
- __ push(Immediate(Factory::false_value()));
- __ bind(&done);
- break;
-
- case Expression::kTest:
- VisitForControl(expr->expression(), false_label_, true_label_);
- break;
-
- case Expression::kValueTest:
- VisitForControl(expr->expression(), false_label_, &push_true);
- __ bind(&push_true);
- __ push(Immediate(Factory::true_value()));
- __ jmp(true_label_);
- break;
-
- case Expression::kTestValue:
- VisitForControl(expr->expression(), &push_false, true_label_);
- __ bind(&push_false);
- __ push(Immediate(Factory::false_value()));
- __ jmp(false_label_);
- break;
- }
- break;
- }
-
- case Token::TYPEOF: {
- Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
- ASSERT_EQ(Expression::kValue, expr->expression()->context());
-
- VariableProxy* proxy = expr->expression()->AsVariableProxy();
- if (proxy != NULL &&
- !proxy->var()->is_this() &&
- proxy->var()->is_global()) {
- Comment cmnt(masm_, "Global variable");
- __ push(CodeGenerator::GlobalObject());
- __ mov(ecx, Immediate(proxy->name()));
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- // Use a regular load, not a contextual load, to avoid a reference
- // error.
- __ call(ic, RelocInfo::CODE_TARGET);
- __ mov(Operand(esp, 0), eax);
- } else if (proxy != NULL &&
- proxy->var()->slot() != NULL &&
- proxy->var()->slot()->type() == Slot::LOOKUP) {
- __ push(esi);
- __ push(Immediate(proxy->name()));
- __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
- __ push(eax);
- } else {
- // This expression cannot throw a reference error at the top level.
- Visit(expr->expression());
- }
-
- __ CallRuntime(Runtime::kTypeof, 1);
- Apply(expr->context(), eax);
- break;
- }
-
- default:
- UNREACHABLE();
- }
-}
-
-
-void FastCodeGenerator::VisitCountOperation(CountOperation* expr) {
- Comment cmnt(masm_, "[ CountOperation");
-
- // Expression can only be a property, a global or a (parameter or local)
- // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
- enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
- LhsKind assign_type = VARIABLE;
- Property* prop = expr->expression()->AsProperty();
- // In case of a property we use the uninitialized expression context
- // of the key to detect a named property.
- if (prop != NULL) {
- assign_type = (prop->key()->context() == Expression::kUninitialized)
- ? NAMED_PROPERTY
- : KEYED_PROPERTY;
- }
-
- // Evaluate expression and get value.
- if (assign_type == VARIABLE) {
- ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
- EmitVariableLoad(expr->expression()->AsVariableProxy()->var(),
- Expression::kValue);
- } else {
- // Reserve space for result of postfix operation.
- if (expr->is_postfix() && expr->context() != Expression::kEffect) {
- ASSERT(expr->context() != Expression::kUninitialized);
- __ push(Immediate(Smi::FromInt(0)));
- }
- Visit(prop->obj());
- ASSERT_EQ(Expression::kValue, prop->obj()->context());
- if (assign_type == NAMED_PROPERTY) {
- EmitNamedPropertyLoad(prop, Expression::kValue);
- } else {
- Visit(prop->key());
- ASSERT_EQ(Expression::kValue, prop->key()->context());
- EmitKeyedPropertyLoad(prop, Expression::kValue);
- }
- }
-
- // Convert to number.
- __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
-
- // Save result for postfix expressions.
- if (expr->is_postfix()) {
- switch (expr->context()) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect:
- // Do not save result.
- break;
- case Expression::kValue:
- case Expression::kTest:
- case Expression::kTestValue:
- case Expression::kValueTest:
- // Save the result on the stack. If we have a named or keyed property
- // we store the result under the receiver that is currently on top
- // of the stack.
- switch (assign_type) {
- case VARIABLE:
- __ push(eax);
- break;
- case NAMED_PROPERTY:
- __ mov(Operand(esp, kPointerSize), eax);
- break;
- case KEYED_PROPERTY:
- __ mov(Operand(esp, 2 * kPointerSize), eax);
- break;
- }
- break;
- }
- }
-
- // Call runtime for +1/-1.
- __ push(eax);
- __ push(Immediate(Smi::FromInt(1)));
- if (expr->op() == Token::INC) {
- __ CallRuntime(Runtime::kNumberAdd, 2);
+ // Negative offsets are inobject properties.
+ if (offset < 0) {
+ offset += map->instance_size();
+ __ mov(ecx, edx); // Copy receiver for write barrier.
} else {
- __ CallRuntime(Runtime::kNumberSub, 2);
- }
-
- // Store the value returned in eax.
- switch (assign_type) {
- case VARIABLE:
- __ push(eax);
- if (expr->is_postfix()) {
- EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Expression::kEffect);
- // For all contexts except kEffect: We have the result on
- // top of the stack.
- if (expr->context() != Expression::kEffect) {
- ApplyTOS(expr->context());
- }
- } else {
- EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- expr->context());
- }
- break;
- case NAMED_PROPERTY: {
- __ mov(ecx, prop->key()->AsLiteral()->handle());
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- // This nop signals to the IC that there is no inlined code at the call
- // site for it to patch.
- __ nop();
- if (expr->is_postfix()) {
- __ Drop(1); // Result is on the stack under the receiver.
- if (expr->context() != Expression::kEffect) {
- ApplyTOS(expr->context());
- }
- } else {
- DropAndApply(1, expr->context(), eax);
- }
- break;
- }
- case KEYED_PROPERTY: {
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- // This nop signals to the IC that there is no inlined code at the call
- // site for it to patch.
- __ nop();
- if (expr->is_postfix()) {
- __ Drop(2); // Result is on the stack under the key and the receiver.
- if (expr->context() != Expression::kEffect) {
- ApplyTOS(expr->context());
- }
- } else {
- DropAndApply(2, expr->context(), eax);
- }
- break;
- }
- }
-}
-
-
-void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
- Comment cmnt(masm_, "[ BinaryOperation");
- switch (expr->op()) {
- case Token::COMMA:
- ASSERT_EQ(Expression::kEffect, expr->left()->context());
- ASSERT_EQ(expr->context(), expr->right()->context());
- Visit(expr->left());
- Visit(expr->right());
- break;
-
- case Token::OR:
- case Token::AND:
- EmitLogicalOperation(expr);
- break;
-
- case Token::ADD:
- case Token::SUB:
- case Token::DIV:
- case Token::MOD:
- case Token::MUL:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SHL:
- case Token::SHR:
- case Token::SAR: {
- ASSERT_EQ(Expression::kValue, expr->left()->context());
- ASSERT_EQ(Expression::kValue, expr->right()->context());
-
- Visit(expr->left());
- Visit(expr->right());
- GenericBinaryOpStub stub(expr->op(),
- NO_OVERWRITE,
- NO_GENERIC_BINARY_FLAGS);
- __ CallStub(&stub);
- Apply(expr->context(), eax);
-
- break;
- }
- default:
- UNREACHABLE();
+ offset += FixedArray::kHeaderSize;
+ __ mov(ecx, FieldOperand(edx, JSObject::kPropertiesOffset));
}
+ // Perform the store.
+ __ mov(FieldOperand(ecx, offset), eax);
+ // Preserve value from write barrier in case it's needed.
+ __ mov(ebx, eax);
+ __ RecordWrite(ecx, offset, ebx, edi);
}
-void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
- Comment cmnt(masm_, "[ CompareOperation");
- ASSERT_EQ(Expression::kValue, expr->left()->context());
- ASSERT_EQ(Expression::kValue, expr->right()->context());
- Visit(expr->left());
- Visit(expr->right());
-
- // Always perform the comparison for its control flow. Pack the result
- // into the expression's context after the comparison is performed.
- Label push_true, push_false, done;
- // Initially assume we are in a test context.
- Label* if_true = true_label_;
- Label* if_false = false_label_;
- switch (expr->context()) {
- case Expression::kUninitialized:
- UNREACHABLE();
- break;
- case Expression::kEffect:
- if_true = &done;
- if_false = &done;
- break;
- case Expression::kValue:
- if_true = &push_true;
- if_false = &push_false;
- break;
- case Expression::kTest:
- break;
- case Expression::kValueTest:
- if_true = &push_true;
- break;
- case Expression::kTestValue:
- if_false = &push_false;
- break;
- }
-
- switch (expr->op()) {
- case Token::IN: {
- __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
- __ cmp(eax, Factory::true_value());
- __ j(equal, if_true);
- __ jmp(if_false);
- break;
- }
-
- case Token::INSTANCEOF: {
- InstanceofStub stub;
- __ CallStub(&stub);
- __ test(eax, Operand(eax));
- __ j(zero, if_true); // The stub returns 0 for true.
- __ jmp(if_false);
- break;
- }
-
- default: {
- Condition cc = no_condition;
- bool strict = false;
- switch (expr->op()) {
- case Token::EQ_STRICT:
- strict = true;
- // Fall through
- case Token::EQ:
- cc = equal;
- __ pop(eax);
- __ pop(edx);
- break;
- case Token::LT:
- cc = less;
- __ pop(eax);
- __ pop(edx);
- break;
- case Token::GT:
- // Reverse left and right sizes to obtain ECMA-262 conversion order.
- cc = less;
- __ pop(edx);
- __ pop(eax);
- break;
- case Token::LTE:
- // Reverse left and right sizes to obtain ECMA-262 conversion order.
- cc = greater_equal;
- __ pop(edx);
- __ pop(eax);
- break;
- case Token::GTE:
- cc = greater_equal;
- __ pop(eax);
- __ pop(edx);
- break;
- case Token::IN:
- case Token::INSTANCEOF:
- default:
- UNREACHABLE();
- }
-
- // The comparison stub expects the smi vs. smi case to be handled
- // before it is called.
- Label slow_case;
- __ mov(ecx, Operand(edx));
- __ or_(ecx, Operand(eax));
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &slow_case, not_taken);
- __ cmp(edx, Operand(eax));
- __ j(cc, if_true);
- __ jmp(if_false);
-
- __ bind(&slow_case);
- CompareStub stub(cc, strict);
- __ CallStub(&stub);
- __ test(eax, Operand(eax));
- __ j(cc, if_true);
- __ jmp(if_false);
- }
- }
-
- // Convert the result of the comparison into one expected for this
- // expression's context.
- switch (expr->context()) {
- case Expression::kUninitialized:
- UNREACHABLE();
- break;
-
- case Expression::kEffect:
- __ bind(&done);
- break;
-
- case Expression::kValue:
- __ bind(&push_true);
- __ push(Immediate(Factory::true_value()));
- __ jmp(&done);
- __ bind(&push_false);
- __ push(Immediate(Factory::false_value()));
- __ bind(&done);
- break;
-
- case Expression::kTest:
- break;
-
- case Expression::kValueTest:
- __ bind(&push_true);
- __ push(Immediate(Factory::true_value()));
- __ jmp(true_label_);
- break;
-
- case Expression::kTestValue:
- __ bind(&push_false);
- __ push(Immediate(Factory::false_value()));
- __ jmp(false_label_);
- break;
- }
-}
-
-
-void FastCodeGenerator::VisitThisFunction(ThisFunction* expr) {
- __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- Apply(expr->context(), eax);
-}
-
-
-Register FastCodeGenerator::result_register() { return eax; }
-
-
-Register FastCodeGenerator::context_register() { return esi; }
-
-
-void FastCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
- ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
- __ mov(Operand(ebp, frame_offset), value);
-}
-
+void FastCodeGenerator::Generate(FunctionLiteral* fun, CompilationInfo* info) {
+ ASSERT(function_ == NULL);
+ ASSERT(info_ == NULL);
+ function_ = fun;
+ info_ = info;
-void FastCodeGenerator::LoadContextField(Register dst, int context_index) {
- __ mov(dst, CodeGenerator::ContextOperand(esi, context_index));
-}
+ // Save the caller's frame pointer and set up our own.
+ Comment prologue_cmnt(masm(), ";; Prologue");
+ __ push(ebp);
+ __ mov(ebp, esp);
+ __ push(esi); // Context.
+ __ push(edi); // Closure.
+ // Note that we keep a live register reference to esi (context) at this
+ // point.
+ // Receiver (this) is allocated to edx if there are this properties.
+ if (has_this_properties()) EmitReceiverMapCheck();
-// ----------------------------------------------------------------------------
-// Non-local control flow support.
+ VisitStatements(fun->body());
-void FastCodeGenerator::EnterFinallyBlock() {
- // Cook return address on top of stack (smi encoded Code* delta)
- ASSERT(!result_register().is(edx));
- __ mov(edx, Operand(esp, 0));
- __ sub(Operand(edx), Immediate(masm_->CodeObject()));
- ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
- ASSERT_EQ(0, kSmiTag);
- __ add(edx, Operand(edx)); // Convert to smi.
- __ mov(Operand(esp, 0), edx);
- // Store result register while executing finally block.
- __ push(result_register());
-}
+ Comment return_cmnt(masm(), ";; Return(<undefined>)");
+ __ mov(eax, Factory::undefined_value());
+ Comment epilogue_cmnt(masm(), ";; Epilogue");
+ __ mov(esp, ebp);
+ __ pop(ebp);
+ __ ret((fun->scope()->num_parameters() + 1) * kPointerSize);
-void FastCodeGenerator::ExitFinallyBlock() {
- ASSERT(!result_register().is(edx));
- // Restore result register from stack.
- __ pop(result_register());
- // Uncook return address.
- __ mov(edx, Operand(esp, 0));
- __ sar(edx, 1); // Convert smi to int.
- __ add(Operand(edx), Immediate(masm_->CodeObject()));
- __ mov(Operand(esp, 0), edx);
- // And return.
- __ ret(0);
+ __ bind(&bailout_);
}
#undef __
+
} } // namespace v8::internal
diff --git a/deps/v8/src/ia32/full-codegen-ia32.cc b/deps/v8/src/ia32/full-codegen-ia32.cc
new file mode 100644
index 000000000..9f9ac56cc
--- /dev/null
+++ b/deps/v8/src/ia32/full-codegen-ia32.cc
@@ -0,0 +1,1900 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "compiler.h"
+#include "debug.h"
+#include "full-codegen.h"
+#include "parser.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm_)
+
+// Generate code for a JS function. On entry to the function the receiver
+// and arguments have been pushed on the stack left to right, with the
+// return address on top of them. The actual argument count matches the
+// formal parameter count expected by the function.
+//
+// The live registers are:
+// o edi: the JS function object being called (ie, ourselves)
+// o esi: our context
+// o ebp: our caller's frame pointer
+// o esp: stack pointer (pointing to return address)
+//
+// The function builds a JS frame. Please see JavaScriptFrameConstants in
+// frames-ia32.h for its layout.
+void FullCodeGenerator::Generate(FunctionLiteral* fun, Mode mode) {
+ function_ = fun;
+ SetFunctionPosition(fun);
+
+ if (mode == PRIMARY) {
+ __ push(ebp); // Caller's frame pointer.
+ __ mov(ebp, esp);
+ __ push(esi); // Callee's context.
+ __ push(edi); // Callee's JS Function.
+
+ { Comment cmnt(masm_, "[ Allocate locals");
+ int locals_count = fun->scope()->num_stack_slots();
+ if (locals_count == 1) {
+ __ push(Immediate(Factory::undefined_value()));
+ } else if (locals_count > 1) {
+ __ mov(eax, Immediate(Factory::undefined_value()));
+ for (int i = 0; i < locals_count; i++) {
+ __ push(eax);
+ }
+ }
+ }
+
+ bool function_in_register = true;
+
+ // Possibly allocate a local context.
+ if (fun->scope()->num_heap_slots() > 0) {
+ Comment cmnt(masm_, "[ Allocate local context");
+ // Argument to NewContext is the function, which is still in edi.
+ __ push(edi);
+ __ CallRuntime(Runtime::kNewContext, 1);
+ function_in_register = false;
+ // Context is returned in both eax and esi. It replaces the context
+ // passed to us. It's saved in the stack and kept live in esi.
+ __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
+
+ // Copy parameters into context if necessary.
+ int num_parameters = fun->scope()->num_parameters();
+ for (int i = 0; i < num_parameters; i++) {
+ Slot* slot = fun->scope()->parameter(i)->slot();
+ if (slot != NULL && slot->type() == Slot::CONTEXT) {
+ int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+ (num_parameters - 1 - i) * kPointerSize;
+ // Load parameter from stack.
+ __ mov(eax, Operand(ebp, parameter_offset));
+ // Store it in the context.
+ int context_offset = Context::SlotOffset(slot->index());
+ __ mov(Operand(esi, context_offset), eax);
+ // Update the write barrier. This clobbers all involved
+ // registers, so we have use a third register to avoid
+ // clobbering esi.
+ __ mov(ecx, esi);
+ __ RecordWrite(ecx, context_offset, eax, ebx);
+ }
+ }
+ }
+
+ Variable* arguments = fun->scope()->arguments()->AsVariable();
+ if (arguments != NULL) {
+ // Function uses arguments object.
+ Comment cmnt(masm_, "[ Allocate arguments object");
+ if (function_in_register) {
+ __ push(edi);
+ } else {
+ __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ }
+ // Receiver is just before the parameters on the caller's stack.
+ __ lea(edx, Operand(ebp, StandardFrameConstants::kCallerSPOffset +
+ fun->num_parameters() * kPointerSize));
+ __ push(edx);
+ __ push(Immediate(Smi::FromInt(fun->num_parameters())));
+ // Arguments to ArgumentsAccessStub:
+ // function, receiver address, parameter count.
+ // The stub will rewrite receiver and parameter count if the previous
+ // stack frame was an arguments adapter frame.
+ ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
+ __ CallStub(&stub);
+ __ mov(ecx, eax); // Duplicate result.
+ Move(arguments->slot(), eax, ebx, edx);
+ Slot* dot_arguments_slot =
+ fun->scope()->arguments_shadow()->AsVariable()->slot();
+ Move(dot_arguments_slot, ecx, ebx, edx);
+ }
+ }
+
+ { Comment cmnt(masm_, "[ Declarations");
+ VisitDeclarations(fun->scope()->declarations());
+ }
+
+ { Comment cmnt(masm_, "[ Stack check");
+ Label ok;
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit();
+ __ cmp(esp, Operand::StaticVariable(stack_limit));
+ __ j(above_equal, &ok, taken);
+ StackCheckStub stub;
+ __ CallStub(&stub);
+ __ bind(&ok);
+ }
+
+ if (FLAG_trace) {
+ __ CallRuntime(Runtime::kTraceEnter, 0);
+ }
+
+ { Comment cmnt(masm_, "[ Body");
+ ASSERT(loop_depth() == 0);
+ VisitStatements(fun->body());
+ ASSERT(loop_depth() == 0);
+ }
+
+ { Comment cmnt(masm_, "[ return <undefined>;");
+ // Emit a 'return undefined' in case control fell off the end of the body.
+ __ mov(eax, Factory::undefined_value());
+ EmitReturnSequence(function_->end_position());
+ }
+}
+
+
+void FullCodeGenerator::EmitReturnSequence(int position) {
+ Comment cmnt(masm_, "[ Return sequence");
+ if (return_label_.is_bound()) {
+ __ jmp(&return_label_);
+ } else {
+ // Common return label
+ __ bind(&return_label_);
+ if (FLAG_trace) {
+ __ push(eax);
+ __ CallRuntime(Runtime::kTraceExit, 1);
+ }
+#ifdef DEBUG
+ // Add a label for checking the size of the code used for returning.
+ Label check_exit_codesize;
+ masm_->bind(&check_exit_codesize);
+#endif
+ CodeGenerator::RecordPositions(masm_, position);
+ __ RecordJSReturn();
+ // Do not use the leave instruction here because it is too short to
+ // patch with the code required by the debugger.
+ __ mov(esp, ebp);
+ __ pop(ebp);
+ __ ret((function_->scope()->num_parameters() + 1) * kPointerSize);
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ // Check that the size of the code used for returning matches what is
+ // expected by the debugger.
+ ASSERT_EQ(Assembler::kJSReturnSequenceLength,
+ masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
+#endif
+ }
+}
+
+
+void FullCodeGenerator::Apply(Expression::Context context, Register reg) {
+ switch (context) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+
+ case Expression::kEffect:
+ // Nothing to do.
+ break;
+
+ case Expression::kValue:
+ // Move value into place.
+ switch (location_) {
+ case kAccumulator:
+ if (!reg.is(result_register())) __ mov(result_register(), reg);
+ break;
+ case kStack:
+ __ push(reg);
+ break;
+ }
+ break;
+
+ case Expression::kTest:
+ // For simplicity we always test the accumulator register.
+ if (!reg.is(result_register())) __ mov(result_register(), reg);
+ DoTest(context);
+ break;
+
+ case Expression::kValueTest:
+ case Expression::kTestValue:
+ if (!reg.is(result_register())) __ mov(result_register(), reg);
+ switch (location_) {
+ case kAccumulator:
+ break;
+ case kStack:
+ __ push(result_register());
+ break;
+ }
+ DoTest(context);
+ break;
+ }
+}
+
+
+void FullCodeGenerator::Apply(Expression::Context context, Slot* slot) {
+ switch (context) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ // Nothing to do.
+ break;
+ case Expression::kValue: {
+ MemOperand slot_operand = EmitSlotSearch(slot, result_register());
+ switch (location_) {
+ case kAccumulator:
+ __ mov(result_register(), slot_operand);
+ break;
+ case kStack:
+ // Memory operands can be pushed directly.
+ __ push(slot_operand);
+ break;
+ }
+ break;
+ }
+
+ case Expression::kTest:
+ // For simplicity we always test the accumulator register.
+ Move(result_register(), slot);
+ DoTest(context);
+ break;
+
+ case Expression::kValueTest:
+ case Expression::kTestValue:
+ Move(result_register(), slot);
+ switch (location_) {
+ case kAccumulator:
+ break;
+ case kStack:
+ __ push(result_register());
+ break;
+ }
+ DoTest(context);
+ break;
+ }
+}
+
+
+void FullCodeGenerator::Apply(Expression::Context context, Literal* lit) {
+ switch (context) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ // Nothing to do.
+ break;
+ case Expression::kValue:
+ switch (location_) {
+ case kAccumulator:
+ __ mov(result_register(), lit->handle());
+ break;
+ case kStack:
+ // Immediates can be pushed directly.
+ __ push(Immediate(lit->handle()));
+ break;
+ }
+ break;
+
+ case Expression::kTest:
+ // For simplicity we always test the accumulator register.
+ __ mov(result_register(), lit->handle());
+ DoTest(context);
+ break;
+
+ case Expression::kValueTest:
+ case Expression::kTestValue:
+ __ mov(result_register(), lit->handle());
+ switch (location_) {
+ case kAccumulator:
+ break;
+ case kStack:
+ __ push(result_register());
+ break;
+ }
+ DoTest(context);
+ break;
+ }
+}
+
+
+void FullCodeGenerator::ApplyTOS(Expression::Context context) {
+ switch (context) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+
+ case Expression::kEffect:
+ __ Drop(1);
+ break;
+
+ case Expression::kValue:
+ switch (location_) {
+ case kAccumulator:
+ __ pop(result_register());
+ break;
+ case kStack:
+ break;
+ }
+ break;
+
+ case Expression::kTest:
+ // For simplicity we always test the accumulator register.
+ __ pop(result_register());
+ DoTest(context);
+ break;
+
+ case Expression::kValueTest:
+ case Expression::kTestValue:
+ switch (location_) {
+ case kAccumulator:
+ __ pop(result_register());
+ break;
+ case kStack:
+ __ mov(result_register(), Operand(esp, 0));
+ break;
+ }
+ DoTest(context);
+ break;
+ }
+}
+
+
+void FullCodeGenerator::DropAndApply(int count,
+ Expression::Context context,
+ Register reg) {
+ ASSERT(count > 0);
+ ASSERT(!reg.is(esp));
+ switch (context) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+
+ case Expression::kEffect:
+ __ Drop(count);
+ break;
+
+ case Expression::kValue:
+ switch (location_) {
+ case kAccumulator:
+ __ Drop(count);
+ if (!reg.is(result_register())) __ mov(result_register(), reg);
+ break;
+ case kStack:
+ if (count > 1) __ Drop(count - 1);
+ __ mov(Operand(esp, 0), reg);
+ break;
+ }
+ break;
+
+ case Expression::kTest:
+ // For simplicity we always test the accumulator register.
+ __ Drop(count);
+ if (!reg.is(result_register())) __ mov(result_register(), reg);
+ DoTest(context);
+ break;
+
+ case Expression::kValueTest:
+ case Expression::kTestValue:
+ switch (location_) {
+ case kAccumulator:
+ __ Drop(count);
+ if (!reg.is(result_register())) __ mov(result_register(), reg);
+ break;
+ case kStack:
+ if (count > 1) __ Drop(count - 1);
+ __ mov(result_register(), reg);
+ __ mov(Operand(esp, 0), result_register());
+ break;
+ }
+ DoTest(context);
+ break;
+ }
+}
+
+
+void FullCodeGenerator::Apply(Expression::Context context,
+ Label* materialize_true,
+ Label* materialize_false) {
+ switch (context) {
+ case Expression::kUninitialized:
+
+ case Expression::kEffect:
+ ASSERT_EQ(materialize_true, materialize_false);
+ __ bind(materialize_true);
+ break;
+
+ case Expression::kValue: {
+ Label done;
+ switch (location_) {
+ case kAccumulator:
+ __ bind(materialize_true);
+ __ mov(result_register(), Factory::true_value());
+ __ jmp(&done);
+ __ bind(materialize_false);
+ __ mov(result_register(), Factory::false_value());
+ break;
+ case kStack:
+ __ bind(materialize_true);
+ __ push(Immediate(Factory::true_value()));
+ __ jmp(&done);
+ __ bind(materialize_false);
+ __ push(Immediate(Factory::false_value()));
+ break;
+ }
+ __ bind(&done);
+ break;
+ }
+
+ case Expression::kTest:
+ break;
+
+ case Expression::kValueTest:
+ __ bind(materialize_true);
+ switch (location_) {
+ case kAccumulator:
+ __ mov(result_register(), Factory::true_value());
+ break;
+ case kStack:
+ __ push(Immediate(Factory::true_value()));
+ break;
+ }
+ __ jmp(true_label_);
+ break;
+
+ case Expression::kTestValue:
+ __ bind(materialize_false);
+ switch (location_) {
+ case kAccumulator:
+ __ mov(result_register(), Factory::false_value());
+ break;
+ case kStack:
+ __ push(Immediate(Factory::false_value()));
+ break;
+ }
+ __ jmp(false_label_);
+ break;
+ }
+}
+
+
+void FullCodeGenerator::DoTest(Expression::Context context) {
+ // The value to test is in the accumulator. If the value might be needed
+ // on the stack (value/test and test/value contexts with a stack location
+ // desired), then the value is already duplicated on the stack.
+ ASSERT_NE(NULL, true_label_);
+ ASSERT_NE(NULL, false_label_);
+
+ // In value/test and test/value expression contexts with stack as the
+ // desired location, there is already an extra value on the stack. Use a
+ // label to discard it if unneeded.
+ Label discard;
+ Label* if_true = true_label_;
+ Label* if_false = false_label_;
+ switch (context) {
+ case Expression::kUninitialized:
+ case Expression::kEffect:
+ case Expression::kValue:
+ UNREACHABLE();
+ case Expression::kTest:
+ break;
+ case Expression::kValueTest:
+ switch (location_) {
+ case kAccumulator:
+ break;
+ case kStack:
+ if_false = &discard;
+ break;
+ }
+ break;
+ case Expression::kTestValue:
+ switch (location_) {
+ case kAccumulator:
+ break;
+ case kStack:
+ if_true = &discard;
+ break;
+ }
+ break;
+ }
+
+ // Emit the inlined tests assumed by the stub.
+ __ cmp(result_register(), Factory::undefined_value());
+ __ j(equal, if_false);
+ __ cmp(result_register(), Factory::true_value());
+ __ j(equal, if_true);
+ __ cmp(result_register(), Factory::false_value());
+ __ j(equal, if_false);
+ ASSERT_EQ(0, kSmiTag);
+ __ test(result_register(), Operand(result_register()));
+ __ j(zero, if_false);
+ __ test(result_register(), Immediate(kSmiTagMask));
+ __ j(zero, if_true);
+
+ // Save a copy of the value if it may be needed and isn't already saved.
+ switch (context) {
+ case Expression::kUninitialized:
+ case Expression::kEffect:
+ case Expression::kValue:
+ UNREACHABLE();
+ case Expression::kTest:
+ break;
+ case Expression::kValueTest:
+ switch (location_) {
+ case kAccumulator:
+ __ push(result_register());
+ break;
+ case kStack:
+ break;
+ }
+ break;
+ case Expression::kTestValue:
+ switch (location_) {
+ case kAccumulator:
+ __ push(result_register());
+ break;
+ case kStack:
+ break;
+ }
+ break;
+ }
+
+ // Call the ToBoolean stub for all other cases.
+ ToBooleanStub stub;
+ __ push(result_register());
+ __ CallStub(&stub);
+ __ test(eax, Operand(eax));
+
+ // The stub returns nonzero for true. Complete based on the context.
+ switch (context) {
+ case Expression::kUninitialized:
+ case Expression::kEffect:
+ case Expression::kValue:
+ UNREACHABLE();
+
+ case Expression::kTest:
+ __ j(not_zero, true_label_);
+ __ jmp(false_label_);
+ break;
+
+ case Expression::kValueTest:
+ switch (location_) {
+ case kAccumulator:
+ __ j(zero, &discard);
+ __ pop(result_register());
+ __ jmp(true_label_);
+ break;
+ case kStack:
+ __ j(not_zero, true_label_);
+ break;
+ }
+ __ bind(&discard);
+ __ Drop(1);
+ __ jmp(false_label_);
+ break;
+
+ case Expression::kTestValue:
+ switch (location_) {
+ case kAccumulator:
+ __ j(not_zero, &discard);
+ __ pop(result_register());
+ __ jmp(false_label_);
+ break;
+ case kStack:
+ __ j(zero, false_label_);
+ break;
+ }
+ __ bind(&discard);
+ __ Drop(1);
+ __ jmp(true_label_);
+ break;
+ }
+}
+
+
+MemOperand FullCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
+ switch (slot->type()) {
+ case Slot::PARAMETER:
+ case Slot::LOCAL:
+ return Operand(ebp, SlotOffset(slot));
+ case Slot::CONTEXT: {
+ int context_chain_length =
+ function_->scope()->ContextChainLength(slot->var()->scope());
+ __ LoadContext(scratch, context_chain_length);
+ return CodeGenerator::ContextOperand(scratch, slot->index());
+ }
+ case Slot::LOOKUP:
+ UNREACHABLE();
+ }
+ UNREACHABLE();
+ return Operand(eax, 0);
+}
+
+
+void FullCodeGenerator::Move(Register destination, Slot* source) {
+ MemOperand location = EmitSlotSearch(source, destination);
+ __ mov(destination, location);
+}
+
+
+void FullCodeGenerator::Move(Slot* dst,
+ Register src,
+ Register scratch1,
+ Register scratch2) {
+ ASSERT(dst->type() != Slot::LOOKUP); // Not yet implemented.
+ ASSERT(!scratch1.is(src) && !scratch2.is(src));
+ MemOperand location = EmitSlotSearch(dst, scratch1);
+ __ mov(location, src);
+ // Emit the write barrier code if the location is in the heap.
+ if (dst->type() == Slot::CONTEXT) {
+ int offset = FixedArray::kHeaderSize + dst->index() * kPointerSize;
+ __ RecordWrite(scratch1, offset, src, scratch2);
+ }
+}
+
+
+void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
+ Comment cmnt(masm_, "[ Declaration");
+ Variable* var = decl->proxy()->var();
+ ASSERT(var != NULL); // Must have been resolved.
+ Slot* slot = var->slot();
+ Property* prop = var->AsProperty();
+
+ if (slot != NULL) {
+ switch (slot->type()) {
+ case Slot::PARAMETER:
+ case Slot::LOCAL:
+ if (decl->mode() == Variable::CONST) {
+ __ mov(Operand(ebp, SlotOffset(slot)),
+ Immediate(Factory::the_hole_value()));
+ } else if (decl->fun() != NULL) {
+ VisitForValue(decl->fun(), kAccumulator);
+ __ mov(Operand(ebp, SlotOffset(slot)), result_register());
+ }
+ break;
+
+ case Slot::CONTEXT:
+ // We bypass the general EmitSlotSearch because we know more about
+ // this specific context.
+
+ // The variable in the decl always resides in the current context.
+ ASSERT_EQ(0, function_->scope()->ContextChainLength(var->scope()));
+ if (FLAG_debug_code) {
+ // Check if we have the correct context pointer.
+ __ mov(ebx,
+ CodeGenerator::ContextOperand(esi, Context::FCONTEXT_INDEX));
+ __ cmp(ebx, Operand(esi));
+ __ Check(equal, "Unexpected declaration in current context.");
+ }
+ if (decl->mode() == Variable::CONST) {
+ __ mov(eax, Immediate(Factory::the_hole_value()));
+ __ mov(CodeGenerator::ContextOperand(esi, slot->index()), eax);
+ // No write barrier since the hole value is in old space.
+ } else if (decl->fun() != NULL) {
+ VisitForValue(decl->fun(), kAccumulator);
+ __ mov(CodeGenerator::ContextOperand(esi, slot->index()),
+ result_register());
+ int offset = Context::SlotOffset(slot->index());
+ __ mov(ebx, esi);
+ __ RecordWrite(ebx, offset, result_register(), ecx);
+ }
+ break;
+
+ case Slot::LOOKUP: {
+ __ push(esi);
+ __ push(Immediate(var->name()));
+ // Declaration nodes are always introduced in one of two modes.
+ ASSERT(decl->mode() == Variable::VAR ||
+ decl->mode() == Variable::CONST);
+ PropertyAttributes attr =
+ (decl->mode() == Variable::VAR) ? NONE : READ_ONLY;
+ __ push(Immediate(Smi::FromInt(attr)));
+ // Push initial value, if any.
+ // Note: For variables we must not push an initial value (such as
+ // 'undefined') because we may have a (legal) redeclaration and we
+ // must not destroy the current value.
+ if (decl->mode() == Variable::CONST) {
+ __ push(Immediate(Factory::the_hole_value()));
+ } else if (decl->fun() != NULL) {
+ VisitForValue(decl->fun(), kStack);
+ } else {
+ __ push(Immediate(Smi::FromInt(0))); // No initial value!
+ }
+ __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+ break;
+ }
+ }
+
+ } else if (prop != NULL) {
+ if (decl->fun() != NULL || decl->mode() == Variable::CONST) {
+ // We are declaring a function or constant that rewrites to a
+ // property. Use (keyed) IC to set the initial value.
+ VisitForValue(prop->obj(), kStack);
+ VisitForValue(prop->key(), kStack);
+
+ if (decl->fun() != NULL) {
+ VisitForValue(decl->fun(), kAccumulator);
+ } else {
+ __ mov(result_register(), Factory::the_hole_value());
+ }
+
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // Absence of a test eax instruction following the call
+ // indicates that none of the load was inlined.
+ __ nop();
+
+ // Value in eax is ignored (declarations are statements). Receiver
+ // and key on stack are discarded.
+ __ Drop(2);
+ }
+ }
+}
+
+
+void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
+ // Call the runtime to declare the globals.
+ __ push(esi); // The context is the first argument.
+ __ push(Immediate(pairs));
+ __ push(Immediate(Smi::FromInt(is_eval_ ? 1 : 0)));
+ __ CallRuntime(Runtime::kDeclareGlobals, 3);
+ // Return value is ignored.
+}
+
+
+void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
+ Comment cmnt(masm_, "[ FunctionLiteral");
+
+ // Build the function boilerplate and instantiate it.
+ Handle<JSFunction> boilerplate =
+ Compiler::BuildBoilerplate(expr, script_, this);
+ if (HasStackOverflow()) return;
+
+ ASSERT(boilerplate->IsBoilerplate());
+
+ // Create a new closure.
+ __ push(esi);
+ __ push(Immediate(boilerplate));
+ __ CallRuntime(Runtime::kNewClosure, 2);
+ Apply(context_, eax);
+}
+
+
+void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
+ Comment cmnt(masm_, "[ VariableProxy");
+ EmitVariableLoad(expr->var(), context_);
+}
+
+
+void FullCodeGenerator::EmitVariableLoad(Variable* var,
+ Expression::Context context) {
+ // Four cases: non-this global variables, lookup slots, all other
+ // types of slots, and parameters that rewrite to explicit property
+ // accesses on the arguments object.
+ Slot* slot = var->slot();
+ Property* property = var->AsProperty();
+
+ if (var->is_global() && !var->is_this()) {
+ Comment cmnt(masm_, "Global variable");
+ // Use inline caching. Variable name is passed in ecx and the global
+ // object on the stack.
+ __ push(CodeGenerator::GlobalObject());
+ __ mov(ecx, var->name());
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ // By emitting a nop we make sure that we do not have a test eax
+ // instruction after the call it is treated specially by the LoadIC code
+ // Remember that the assembler may choose to do peephole optimization
+ // (eg, push/pop elimination).
+ __ nop();
+ DropAndApply(1, context, eax);
+
+ } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
+ Comment cmnt(masm_, "Lookup slot");
+ __ push(esi); // Context.
+ __ push(Immediate(var->name()));
+ __ CallRuntime(Runtime::kLoadContextSlot, 2);
+ Apply(context, eax);
+
+ } else if (slot != NULL) {
+ Comment cmnt(masm_, (slot->type() == Slot::CONTEXT)
+ ? "Context slot"
+ : "Stack slot");
+ Apply(context, slot);
+
+ } else {
+ Comment cmnt(masm_, "Rewritten parameter");
+ ASSERT_NOT_NULL(property);
+ // Rewritten parameter accesses are of the form "slot[literal]".
+
+ // Assert that the object is in a slot.
+ Variable* object_var = property->obj()->AsVariableProxy()->AsVariable();
+ ASSERT_NOT_NULL(object_var);
+ Slot* object_slot = object_var->slot();
+ ASSERT_NOT_NULL(object_slot);
+
+ // Load the object.
+ MemOperand object_loc = EmitSlotSearch(object_slot, eax);
+ __ push(object_loc);
+
+ // Assert that the key is a smi.
+ Literal* key_literal = property->key()->AsLiteral();
+ ASSERT_NOT_NULL(key_literal);
+ ASSERT(key_literal->handle()->IsSmi());
+
+ // Load the key.
+ __ push(Immediate(key_literal->handle()));
+
+ // Do a keyed property load.
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // Notice: We must not have a "test eax, ..." instruction after the
+ // call. It is treated specially by the LoadIC code.
+ __ nop();
+ // Drop key and object left on the stack by IC.
+ DropAndApply(2, context, eax);
+ }
+}
+
+
+void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
+ Comment cmnt(masm_, "[ RegExpLiteral");
+ Label done;
+ // Registers will be used as follows:
+ // edi = JS function.
+ // ebx = literals array.
+ // eax = regexp literal.
+ __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ __ mov(ebx, FieldOperand(edi, JSFunction::kLiteralsOffset));
+ int literal_offset =
+ FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
+ __ mov(eax, FieldOperand(ebx, literal_offset));
+ __ cmp(eax, Factory::undefined_value());
+ __ j(not_equal, &done);
+ // Create regexp literal using runtime function
+ // Result will be in eax.
+ __ push(ebx);
+ __ push(Immediate(Smi::FromInt(expr->literal_index())));
+ __ push(Immediate(expr->pattern()));
+ __ push(Immediate(expr->flags()));
+ __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
+ // Label done:
+ __ bind(&done);
+ Apply(context_, eax);
+}
+
+
+void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
+ Comment cmnt(masm_, "[ ObjectLiteral");
+ __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ __ push(FieldOperand(edi, JSFunction::kLiteralsOffset));
+ __ push(Immediate(Smi::FromInt(expr->literal_index())));
+ __ push(Immediate(expr->constant_properties()));
+ if (expr->depth() > 1) {
+ __ CallRuntime(Runtime::kCreateObjectLiteral, 3);
+ } else {
+ __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 3);
+ }
+
+ // If result_saved is true the result is on top of the stack. If
+ // result_saved is false the result is in eax.
+ bool result_saved = false;
+
+ for (int i = 0; i < expr->properties()->length(); i++) {
+ ObjectLiteral::Property* property = expr->properties()->at(i);
+ if (property->IsCompileTimeValue()) continue;
+
+ Literal* key = property->key();
+ Expression* value = property->value();
+ if (!result_saved) {
+ __ push(eax); // Save result on the stack
+ result_saved = true;
+ }
+ switch (property->kind()) {
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+ ASSERT(!CompileTimeValue::IsCompileTimeValue(value));
+ // Fall through.
+ case ObjectLiteral::Property::COMPUTED:
+ if (key->handle()->IsSymbol()) {
+ VisitForValue(value, kAccumulator);
+ __ mov(ecx, Immediate(key->handle()));
+ __ mov(edx, Operand(esp, 0));
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ __ nop();
+ break;
+ }
+ // Fall through.
+ case ObjectLiteral::Property::PROTOTYPE:
+ __ push(Operand(esp, 0)); // Duplicate receiver.
+ VisitForValue(key, kStack);
+ VisitForValue(value, kStack);
+ __ CallRuntime(Runtime::kSetProperty, 3);
+ break;
+ case ObjectLiteral::Property::SETTER:
+ case ObjectLiteral::Property::GETTER:
+ __ push(Operand(esp, 0)); // Duplicate receiver.
+ VisitForValue(key, kStack);
+ __ push(Immediate(property->kind() == ObjectLiteral::Property::SETTER ?
+ Smi::FromInt(1) :
+ Smi::FromInt(0)));
+ VisitForValue(value, kStack);
+ __ CallRuntime(Runtime::kDefineAccessor, 4);
+ break;
+ default: UNREACHABLE();
+ }
+ }
+
+ if (result_saved) {
+ ApplyTOS(context_);
+ } else {
+ Apply(context_, eax);
+ }
+}
+
+
+void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
+ Comment cmnt(masm_, "[ ArrayLiteral");
+ __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ __ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
+ __ push(Immediate(Smi::FromInt(expr->literal_index())));
+ __ push(Immediate(expr->constant_elements()));
+ if (expr->depth() > 1) {
+ __ CallRuntime(Runtime::kCreateArrayLiteral, 3);
+ } else {
+ __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
+ }
+
+ bool result_saved = false; // Is the result saved to the stack?
+
+ // Emit code to evaluate all the non-constant subexpressions and to store
+ // them into the newly cloned array.
+ ZoneList<Expression*>* subexprs = expr->values();
+ for (int i = 0, len = subexprs->length(); i < len; i++) {
+ Expression* subexpr = subexprs->at(i);
+ // If the subexpression is a literal or a simple materialized literal it
+ // is already set in the cloned array.
+ if (subexpr->AsLiteral() != NULL ||
+ CompileTimeValue::IsCompileTimeValue(subexpr)) {
+ continue;
+ }
+
+ if (!result_saved) {
+ __ push(eax);
+ result_saved = true;
+ }
+ VisitForValue(subexpr, kAccumulator);
+
+ // Store the subexpression value in the array's elements.
+ __ mov(ebx, Operand(esp, 0)); // Copy of array literal.
+ __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
+ int offset = FixedArray::kHeaderSize + (i * kPointerSize);
+ __ mov(FieldOperand(ebx, offset), result_register());
+
+ // Update the write barrier for the array store.
+ __ RecordWrite(ebx, offset, result_register(), ecx);
+ }
+
+ if (result_saved) {
+ ApplyTOS(context_);
+ } else {
+ Apply(context_, eax);
+ }
+}
+
+
+void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
+ SetSourcePosition(prop->position());
+ Literal* key = prop->key()->AsLiteral();
+ __ mov(ecx, Immediate(key->handle()));
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ __ nop();
+}
+
+
+void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
+ SetSourcePosition(prop->position());
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ __ nop();
+}
+
+
+void FullCodeGenerator::EmitBinaryOp(Token::Value op,
+ Expression::Context context) {
+ __ push(result_register());
+ GenericBinaryOpStub stub(op,
+ NO_OVERWRITE,
+ NO_GENERIC_BINARY_FLAGS);
+ __ CallStub(&stub);
+ Apply(context, eax);
+}
+
+
+void FullCodeGenerator::EmitVariableAssignment(Variable* var,
+ Expression::Context context) {
+ // Three main cases: global variables, lookup slots, and all other
+ // types of slots. Left-hand-side parameters that rewrite to
+ // explicit property accesses do not reach here.
+ ASSERT(var != NULL);
+ ASSERT(var->is_global() || var->slot() != NULL);
+
+ Slot* slot = var->slot();
+ if (var->is_global()) {
+ ASSERT(!var->is_this());
+ // Assignment to a global variable. Use inline caching for the
+ // assignment. Right-hand-side value is passed in eax, variable name in
+ // ecx, and the global object on the stack.
+ __ mov(ecx, var->name());
+ __ mov(edx, CodeGenerator::GlobalObject());
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ __ nop();
+ Apply(context, eax);
+
+ } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
+ __ push(result_register()); // Value.
+ __ push(esi); // Context.
+ __ push(Immediate(var->name()));
+ __ CallRuntime(Runtime::kStoreContextSlot, 3);
+ Apply(context, eax);
+
+ } else if (slot != NULL) {
+ switch (slot->type()) {
+ case Slot::LOCAL:
+ case Slot::PARAMETER:
+ __ mov(Operand(ebp, SlotOffset(slot)), result_register());
+ break;
+
+ case Slot::CONTEXT: {
+ MemOperand target = EmitSlotSearch(slot, ecx);
+ __ mov(target, result_register());
+
+ // RecordWrite may destroy all its register arguments.
+ __ mov(edx, result_register());
+ int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
+ __ RecordWrite(ecx, offset, edx, ebx);
+ break;
+ }
+
+ case Slot::LOOKUP:
+ UNREACHABLE();
+ break;
+ }
+ Apply(context, result_register());
+
+ } else {
+ // Variables rewritten as properties are not treated as variables in
+ // assignments.
+ UNREACHABLE();
+ }
+}
+
+
+void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
+ // Assignment to a property, using a named store IC.
+ Property* prop = expr->target()->AsProperty();
+ ASSERT(prop != NULL);
+ ASSERT(prop->key()->AsLiteral() != NULL);
+
+ // If the assignment starts a block of assignments to the same object,
+ // change to slow case to avoid the quadratic behavior of repeatedly
+ // adding fast properties.
+ if (expr->starts_initialization_block()) {
+ __ push(result_register());
+ __ push(Operand(esp, kPointerSize)); // Receiver is now under value.
+ __ CallRuntime(Runtime::kToSlowProperties, 1);
+ __ pop(result_register());
+ }
+
+ // Record source code position before IC call.
+ SetSourcePosition(expr->position());
+ __ mov(ecx, prop->key()->AsLiteral()->handle());
+ if (expr->ends_initialization_block()) {
+ __ mov(edx, Operand(esp, 0));
+ } else {
+ __ pop(edx);
+ }
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ __ nop();
+
+ // If the assignment ends an initialization block, revert to fast case.
+ if (expr->ends_initialization_block()) {
+ __ push(eax); // Result of assignment, saved even if not needed.
+ __ push(Operand(esp, kPointerSize)); // Receiver is under value.
+ __ CallRuntime(Runtime::kToFastProperties, 1);
+ __ pop(eax);
+ DropAndApply(1, context_, eax);
+ } else {
+ Apply(context_, eax);
+ }
+}
+
+
+void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
+ // Assignment to a property, using a keyed store IC.
+
+ // If the assignment starts a block of assignments to the same object,
+ // change to slow case to avoid the quadratic behavior of repeatedly
+ // adding fast properties.
+ if (expr->starts_initialization_block()) {
+ __ push(result_register());
+ // Receiver is now under the key and value.
+ __ push(Operand(esp, 2 * kPointerSize));
+ __ CallRuntime(Runtime::kToSlowProperties, 1);
+ __ pop(result_register());
+ }
+
+ // Record source code position before IC call.
+ SetSourcePosition(expr->position());
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // This nop signals to the IC that there is no inlined code at the call
+ // site for it to patch.
+ __ nop();
+
+ // If the assignment ends an initialization block, revert to fast case.
+ if (expr->ends_initialization_block()) {
+ __ push(eax); // Result of assignment, saved even if not needed.
+ // Receiver is under the key and value.
+ __ push(Operand(esp, 2 * kPointerSize));
+ __ CallRuntime(Runtime::kToFastProperties, 1);
+ __ pop(eax);
+ }
+
+ // Receiver and key are still on stack.
+ DropAndApply(2, context_, eax);
+}
+
+
+void FullCodeGenerator::VisitProperty(Property* expr) {
+ Comment cmnt(masm_, "[ Property");
+ Expression* key = expr->key();
+
+ // Evaluate the receiver.
+ VisitForValue(expr->obj(), kStack);
+
+ if (key->IsPropertyName()) {
+ EmitNamedPropertyLoad(expr);
+ // Drop receiver left on the stack by IC.
+ DropAndApply(1, context_, eax);
+ } else {
+ VisitForValue(expr->key(), kStack);
+ EmitKeyedPropertyLoad(expr);
+ // Drop key and receiver left on the stack by IC.
+ DropAndApply(2, context_, eax);
+ }
+}
+
+
+void FullCodeGenerator::EmitCallWithIC(Call* expr,
+ Handle<Object> name,
+ RelocInfo::Mode mode) {
+ // Code common for calls using the IC.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForValue(args->at(i), kStack);
+ }
+ __ Set(ecx, Immediate(name));
+ // Record source position of the IC call.
+ SetSourcePosition(expr->position());
+ InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
+ Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count, in_loop);
+ __ call(ic, mode);
+ // Restore context register.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ Apply(context_, eax);
+}
+
+
+void FullCodeGenerator::EmitCallWithStub(Call* expr) {
+ // Code common for calls using the call stub.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForValue(args->at(i), kStack);
+ }
+ // Record source position for debugger.
+ SetSourcePosition(expr->position());
+ CallFunctionStub stub(arg_count, NOT_IN_LOOP, RECEIVER_MIGHT_BE_VALUE);
+ __ CallStub(&stub);
+ // Restore context register.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ DropAndApply(1, context_, eax);
+}
+
+
+void FullCodeGenerator::VisitCall(Call* expr) {
+ Comment cmnt(masm_, "[ Call");
+ Expression* fun = expr->expression();
+ Variable* var = fun->AsVariableProxy()->AsVariable();
+
+ if (var != NULL && var->is_possibly_eval()) {
+ // Call to the identifier 'eval'.
+ UNREACHABLE();
+ } else if (var != NULL && !var->is_this() && var->is_global()) {
+ // Push global object as receiver for the call IC.
+ __ push(CodeGenerator::GlobalObject());
+ EmitCallWithIC(expr, var->name(), RelocInfo::CODE_TARGET_CONTEXT);
+ } else if (var != NULL && var->slot() != NULL &&
+ var->slot()->type() == Slot::LOOKUP) {
+ // Call to a lookup slot.
+ UNREACHABLE();
+ } else if (fun->AsProperty() != NULL) {
+ // Call to an object property.
+ Property* prop = fun->AsProperty();
+ Literal* key = prop->key()->AsLiteral();
+ if (key != NULL && key->handle()->IsSymbol()) {
+ // Call to a named property, use call IC.
+ VisitForValue(prop->obj(), kStack);
+ EmitCallWithIC(expr, key->handle(), RelocInfo::CODE_TARGET);
+ } else {
+ // Call to a keyed property, use keyed load IC followed by function
+ // call.
+ VisitForValue(prop->obj(), kStack);
+ VisitForValue(prop->key(), kStack);
+ // Record source code position for IC call.
+ SetSourcePosition(prop->position());
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // By emitting a nop we make sure that we do not have a "test eax,..."
+ // instruction after the call it is treated specially by the LoadIC code.
+ __ nop();
+ // Drop key left on the stack by IC.
+ __ Drop(1);
+ // Pop receiver.
+ __ pop(ebx);
+ // Push result (function).
+ __ push(eax);
+ // Push receiver object on stack.
+ if (prop->is_synthetic()) {
+ __ mov(ecx, CodeGenerator::GlobalObject());
+ __ push(FieldOperand(ecx, GlobalObject::kGlobalReceiverOffset));
+ } else {
+ __ push(ebx);
+ }
+ EmitCallWithStub(expr);
+ }
+ } else {
+ // Call to some other expression. If the expression is an anonymous
+ // function literal not called in a loop, mark it as one that should
+ // also use the full code generator.
+ FunctionLiteral* lit = fun->AsFunctionLiteral();
+ if (lit != NULL &&
+ lit->name()->Equals(Heap::empty_string()) &&
+ loop_depth() == 0) {
+ lit->set_try_full_codegen(true);
+ }
+ VisitForValue(fun, kStack);
+ // Load global receiver object.
+ __ mov(ebx, CodeGenerator::GlobalObject());
+ __ push(FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
+ // Emit function call.
+ EmitCallWithStub(expr);
+ }
+}
+
+
+void FullCodeGenerator::VisitCallNew(CallNew* expr) {
+ Comment cmnt(masm_, "[ CallNew");
+ // According to ECMA-262, section 11.2.2, page 44, the function
+ // expression in new calls must be evaluated before the
+ // arguments.
+ // Push function on the stack.
+ VisitForValue(expr->expression(), kStack);
+
+ // Push global object (receiver).
+ __ push(CodeGenerator::GlobalObject());
+
+ // Push the arguments ("left-to-right") on the stack.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForValue(args->at(i), kStack);
+ }
+
+ // Call the construct call builtin that handles allocation and
+ // constructor invocation.
+ SetSourcePosition(expr->position());
+
+ // Load function, arg_count into edi and eax.
+ __ Set(eax, Immediate(arg_count));
+ // Function is in esp[arg_count + 1].
+ __ mov(edi, Operand(esp, eax, times_pointer_size, kPointerSize));
+
+ Handle<Code> construct_builtin(Builtins::builtin(Builtins::JSConstructCall));
+ __ call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
+
+ // Replace function on TOS with result in eax, or pop it.
+ DropAndApply(1, context_, eax);
+}
+
+
+void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
+ Comment cmnt(masm_, "[ CallRuntime");
+ ZoneList<Expression*>* args = expr->arguments();
+
+ if (expr->is_jsruntime()) {
+ // Prepare for calling JS runtime function.
+ __ mov(eax, CodeGenerator::GlobalObject());
+ __ push(FieldOperand(eax, GlobalObject::kBuiltinsOffset));
+ }
+
+ // Push the arguments ("left-to-right").
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForValue(args->at(i), kStack);
+ }
+
+ if (expr->is_jsruntime()) {
+ // Call the JS runtime function via a call IC.
+ __ Set(ecx, Immediate(expr->name()));
+ InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
+ Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count, in_loop);
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // Restore context register.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ } else {
+ // Call the C runtime function.
+ __ CallRuntime(expr->function(), arg_count);
+ }
+ Apply(context_, eax);
+}
+
+
+void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
+ switch (expr->op()) {
+ case Token::VOID: {
+ Comment cmnt(masm_, "[ UnaryOperation (VOID)");
+ VisitForEffect(expr->expression());
+ switch (context_) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ break;
+ case Expression::kEffect:
+ break;
+ case Expression::kValue:
+ switch (location_) {
+ case kAccumulator:
+ __ mov(result_register(), Factory::undefined_value());
+ break;
+ case kStack:
+ __ push(Immediate(Factory::undefined_value()));
+ break;
+ }
+ break;
+ case Expression::kTestValue:
+ // Value is false so it's needed.
+ switch (location_) {
+ case kAccumulator:
+ __ mov(result_register(), Factory::undefined_value());
+ break;
+ case kStack:
+ __ push(Immediate(Factory::undefined_value()));
+ break;
+ }
+ // Fall through.
+ case Expression::kTest:
+ case Expression::kValueTest:
+ __ jmp(false_label_);
+ break;
+ }
+ break;
+ }
+
+ case Token::NOT: {
+ Comment cmnt(masm_, "[ UnaryOperation (NOT)");
+ Label materialize_true, materialize_false, done;
+ // Initially assume a pure test context. Notice that the labels are
+ // swapped.
+ Label* if_true = false_label_;
+ Label* if_false = true_label_;
+ switch (context_) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ break;
+ case Expression::kEffect:
+ if_true = &done;
+ if_false = &done;
+ break;
+ case Expression::kValue:
+ if_true = &materialize_false;
+ if_false = &materialize_true;
+ break;
+ case Expression::kTest:
+ break;
+ case Expression::kValueTest:
+ if_false = &materialize_true;
+ break;
+ case Expression::kTestValue:
+ if_true = &materialize_false;
+ break;
+ }
+ VisitForControl(expr->expression(), if_true, if_false);
+ Apply(context_, if_false, if_true); // Labels swapped.
+ break;
+ }
+
+ case Token::TYPEOF: {
+ Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
+ VariableProxy* proxy = expr->expression()->AsVariableProxy();
+ if (proxy != NULL &&
+ !proxy->var()->is_this() &&
+ proxy->var()->is_global()) {
+ Comment cmnt(masm_, "Global variable");
+ __ push(CodeGenerator::GlobalObject());
+ __ mov(ecx, Immediate(proxy->name()));
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ // Use a regular load, not a contextual load, to avoid a reference
+ // error.
+ __ call(ic, RelocInfo::CODE_TARGET);
+ __ mov(Operand(esp, 0), eax);
+ } else if (proxy != NULL &&
+ proxy->var()->slot() != NULL &&
+ proxy->var()->slot()->type() == Slot::LOOKUP) {
+ __ push(esi);
+ __ push(Immediate(proxy->name()));
+ __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+ __ push(eax);
+ } else {
+ // This expression cannot throw a reference error at the top level.
+ VisitForValue(expr->expression(), kStack);
+ }
+
+ __ CallRuntime(Runtime::kTypeof, 1);
+ Apply(context_, eax);
+ break;
+ }
+
+ case Token::ADD: {
+ Comment cmt(masm_, "[ UnaryOperation (ADD)");
+ VisitForValue(expr->expression(), kAccumulator);
+ Label no_conversion;
+ __ test(result_register(), Immediate(kSmiTagMask));
+ __ j(zero, &no_conversion);
+ __ push(result_register());
+ __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
+ __ bind(&no_conversion);
+ Apply(context_, result_register());
+ break;
+ }
+
+ case Token::SUB: {
+ Comment cmt(masm_, "[ UnaryOperation (SUB)");
+ bool overwrite =
+ (expr->expression()->AsBinaryOperation() != NULL &&
+ expr->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
+ GenericUnaryOpStub stub(Token::SUB, overwrite);
+ // GenericUnaryOpStub expects the argument to be in the
+ // accumulator register eax.
+ VisitForValue(expr->expression(), kAccumulator);
+ __ CallStub(&stub);
+ Apply(context_, eax);
+ break;
+ }
+
+ case Token::BIT_NOT: {
+ Comment cmt(masm_, "[ UnaryOperation (BIT_NOT)");
+ bool overwrite =
+ (expr->expression()->AsBinaryOperation() != NULL &&
+ expr->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
+ GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
+ // GenericUnaryOpStub expects the argument to be in the
+ // accumulator register eax.
+ VisitForValue(expr->expression(), kAccumulator);
+ // Avoid calling the stub for Smis.
+ Label smi, done;
+ __ test(result_register(), Immediate(kSmiTagMask));
+ __ j(zero, &smi);
+ // Non-smi: call stub leaving result in accumulator register.
+ __ CallStub(&stub);
+ __ jmp(&done);
+ // Perform operation directly on Smis.
+ __ bind(&smi);
+ __ not_(result_register());
+ __ and_(result_register(), ~kSmiTagMask); // Remove inverted smi-tag.
+ __ bind(&done);
+ Apply(context_, result_register());
+ break;
+ }
+
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
+ Comment cmnt(masm_, "[ CountOperation");
+
+ // Expression can only be a property, a global or a (parameter or local)
+ // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+ enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+ LhsKind assign_type = VARIABLE;
+ Property* prop = expr->expression()->AsProperty();
+ // In case of a property we use the uninitialized expression context
+ // of the key to detect a named property.
+ if (prop != NULL) {
+ assign_type =
+ (prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
+ }
+
+ // Evaluate expression and get value.
+ if (assign_type == VARIABLE) {
+ ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
+ Location saved_location = location_;
+ location_ = kAccumulator;
+ EmitVariableLoad(expr->expression()->AsVariableProxy()->var(),
+ Expression::kValue);
+ location_ = saved_location;
+ } else {
+ // Reserve space for result of postfix operation.
+ if (expr->is_postfix() && context_ != Expression::kEffect) {
+ __ push(Immediate(Smi::FromInt(0)));
+ }
+ VisitForValue(prop->obj(), kStack);
+ if (assign_type == NAMED_PROPERTY) {
+ EmitNamedPropertyLoad(prop);
+ } else {
+ VisitForValue(prop->key(), kStack);
+ EmitKeyedPropertyLoad(prop);
+ }
+ }
+
+ // Call ToNumber only if operand is not a smi.
+ Label no_conversion;
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &no_conversion);
+ __ push(eax);
+ __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
+ __ bind(&no_conversion);
+
+ // Save result for postfix expressions.
+ if (expr->is_postfix()) {
+ switch (context_) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ // Do not save result.
+ break;
+ case Expression::kValue:
+ case Expression::kTest:
+ case Expression::kValueTest:
+ case Expression::kTestValue:
+ // Save the result on the stack. If we have a named or keyed property
+ // we store the result under the receiver that is currently on top
+ // of the stack.
+ switch (assign_type) {
+ case VARIABLE:
+ __ push(eax);
+ break;
+ case NAMED_PROPERTY:
+ __ mov(Operand(esp, kPointerSize), eax);
+ break;
+ case KEYED_PROPERTY:
+ __ mov(Operand(esp, 2 * kPointerSize), eax);
+ break;
+ }
+ break;
+ }
+ }
+
+ // Inline smi case if we are in a loop.
+ Label stub_call, done;
+ if (loop_depth() > 0) {
+ if (expr->op() == Token::INC) {
+ __ add(Operand(eax), Immediate(Smi::FromInt(1)));
+ } else {
+ __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
+ }
+ __ j(overflow, &stub_call);
+ // We could eliminate this smi check if we split the code at
+ // the first smi check before calling ToNumber.
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &done);
+ __ bind(&stub_call);
+ // Call stub. Undo operation first.
+ if (expr->op() == Token::INC) {
+ __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
+ } else {
+ __ add(Operand(eax), Immediate(Smi::FromInt(1)));
+ }
+ }
+ // Call stub for +1/-1.
+ GenericBinaryOpStub stub(expr->binary_op(),
+ NO_OVERWRITE,
+ NO_GENERIC_BINARY_FLAGS);
+ stub.GenerateCall(masm(), eax, Smi::FromInt(1));
+ __ bind(&done);
+
+ // Store the value returned in eax.
+ switch (assign_type) {
+ case VARIABLE:
+ if (expr->is_postfix()) {
+ EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+ Expression::kEffect);
+ // For all contexts except kEffect: We have the result on
+ // top of the stack.
+ if (context_ != Expression::kEffect) {
+ ApplyTOS(context_);
+ }
+ } else {
+ EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+ context_);
+ }
+ break;
+ case NAMED_PROPERTY: {
+ __ mov(ecx, prop->key()->AsLiteral()->handle());
+ __ pop(edx);
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // This nop signals to the IC that there is no inlined code at the call
+ // site for it to patch.
+ __ nop();
+ if (expr->is_postfix()) {
+ if (context_ != Expression::kEffect) {
+ ApplyTOS(context_);
+ }
+ } else {
+ Apply(context_, eax);
+ }
+ break;
+ }
+ case KEYED_PROPERTY: {
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // This nop signals to the IC that there is no inlined code at the call
+ // site for it to patch.
+ __ nop();
+ if (expr->is_postfix()) {
+ __ Drop(2); // Result is on the stack under the key and the receiver.
+ if (context_ != Expression::kEffect) {
+ ApplyTOS(context_);
+ }
+ } else {
+ DropAndApply(2, context_, eax);
+ }
+ break;
+ }
+ }
+}
+
+
+void FullCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
+ Comment cmnt(masm_, "[ BinaryOperation");
+ switch (expr->op()) {
+ case Token::COMMA:
+ VisitForEffect(expr->left());
+ Visit(expr->right());
+ break;
+
+ case Token::OR:
+ case Token::AND:
+ EmitLogicalOperation(expr);
+ break;
+
+ case Token::ADD:
+ case Token::SUB:
+ case Token::DIV:
+ case Token::MOD:
+ case Token::MUL:
+ case Token::BIT_OR:
+ case Token::BIT_AND:
+ case Token::BIT_XOR:
+ case Token::SHL:
+ case Token::SHR:
+ case Token::SAR:
+ VisitForValue(expr->left(), kStack);
+ VisitForValue(expr->right(), kAccumulator);
+ EmitBinaryOp(expr->op(), context_);
+ break;
+
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
+ Comment cmnt(masm_, "[ CompareOperation");
+
+ // Always perform the comparison for its control flow. Pack the result
+ // into the expression's context after the comparison is performed.
+ Label materialize_true, materialize_false, done;
+ // Initially assume we are in a test context.
+ Label* if_true = true_label_;
+ Label* if_false = false_label_;
+ switch (context_) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ break;
+ case Expression::kEffect:
+ if_true = &done;
+ if_false = &done;
+ break;
+ case Expression::kValue:
+ if_true = &materialize_true;
+ if_false = &materialize_false;
+ break;
+ case Expression::kTest:
+ break;
+ case Expression::kValueTest:
+ if_true = &materialize_true;
+ break;
+ case Expression::kTestValue:
+ if_false = &materialize_false;
+ break;
+ }
+
+ VisitForValue(expr->left(), kStack);
+ switch (expr->op()) {
+ case Token::IN:
+ VisitForValue(expr->right(), kStack);
+ __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
+ __ cmp(eax, Factory::true_value());
+ __ j(equal, if_true);
+ __ jmp(if_false);
+ break;
+
+ case Token::INSTANCEOF: {
+ VisitForValue(expr->right(), kStack);
+ InstanceofStub stub;
+ __ CallStub(&stub);
+ __ test(eax, Operand(eax));
+ __ j(zero, if_true); // The stub returns 0 for true.
+ __ jmp(if_false);
+ break;
+ }
+
+ default: {
+ VisitForValue(expr->right(), kAccumulator);
+ Condition cc = no_condition;
+ bool strict = false;
+ switch (expr->op()) {
+ case Token::EQ_STRICT:
+ strict = true;
+ // Fall through
+ case Token::EQ:
+ cc = equal;
+ __ pop(edx);
+ break;
+ case Token::LT:
+ cc = less;
+ __ pop(edx);
+ break;
+ case Token::GT:
+ // Reverse left and right sizes to obtain ECMA-262 conversion order.
+ cc = less;
+ __ mov(edx, result_register());
+ __ pop(eax);
+ break;
+ case Token::LTE:
+ // Reverse left and right sizes to obtain ECMA-262 conversion order.
+ cc = greater_equal;
+ __ mov(edx, result_register());
+ __ pop(eax);
+ break;
+ case Token::GTE:
+ cc = greater_equal;
+ __ pop(edx);
+ break;
+ case Token::IN:
+ case Token::INSTANCEOF:
+ default:
+ UNREACHABLE();
+ }
+
+ // The comparison stub expects the smi vs. smi case to be handled
+ // before it is called.
+ Label slow_case;
+ __ mov(ecx, Operand(edx));
+ __ or_(ecx, Operand(eax));
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ j(not_zero, &slow_case, not_taken);
+ __ cmp(edx, Operand(eax));
+ __ j(cc, if_true);
+ __ jmp(if_false);
+
+ __ bind(&slow_case);
+ CompareStub stub(cc, strict);
+ __ CallStub(&stub);
+ __ test(eax, Operand(eax));
+ __ j(cc, if_true);
+ __ jmp(if_false);
+ }
+ }
+
+ // Convert the result of the comparison into one expected for this
+ // expression's context.
+ Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
+ __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ Apply(context_, eax);
+}
+
+
+Register FullCodeGenerator::result_register() { return eax; }
+
+
+Register FullCodeGenerator::context_register() { return esi; }
+
+
+void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
+ ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
+ __ mov(Operand(ebp, frame_offset), value);
+}
+
+
+void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
+ __ mov(dst, CodeGenerator::ContextOperand(esi, context_index));
+}
+
+
+// ----------------------------------------------------------------------------
+// Non-local control flow support.
+
+void FullCodeGenerator::EnterFinallyBlock() {
+ // Cook return address on top of stack (smi encoded Code* delta)
+ ASSERT(!result_register().is(edx));
+ __ mov(edx, Operand(esp, 0));
+ __ sub(Operand(edx), Immediate(masm_->CodeObject()));
+ ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
+ ASSERT_EQ(0, kSmiTag);
+ __ add(edx, Operand(edx)); // Convert to smi.
+ __ mov(Operand(esp, 0), edx);
+ // Store result register while executing finally block.
+ __ push(result_register());
+}
+
+
+void FullCodeGenerator::ExitFinallyBlock() {
+ ASSERT(!result_register().is(edx));
+ // Restore result register from stack.
+ __ pop(result_register());
+ // Uncook return address.
+ __ mov(edx, Operand(esp, 0));
+ __ sar(edx, 1); // Convert smi to int.
+ __ add(Operand(edx), Immediate(masm_->CodeObject()));
+ __ mov(Operand(esp, 0), edx);
+ // And return.
+ __ ret(0);
+}
+
+
+#undef __
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/ia32/ic-ia32.cc b/deps/v8/src/ia32/ic-ia32.cc
index 5658605aa..44dae3b4b 100644
--- a/deps/v8/src/ia32/ic-ia32.cc
+++ b/deps/v8/src/ia32/ic-ia32.cc
@@ -180,7 +180,6 @@ void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
// -- esp[0] : return address
// -- esp[4] : receiver
// -----------------------------------
-
Label miss;
__ mov(eax, Operand(esp, kPointerSize));
@@ -197,7 +196,6 @@ void LoadIC::GenerateStringLength(MacroAssembler* masm) {
// -- esp[0] : return address
// -- esp[4] : receiver
// -----------------------------------
-
Label miss;
__ mov(eax, Operand(esp, kPointerSize));
@@ -214,7 +212,6 @@ void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
// -- esp[0] : return address
// -- esp[4] : receiver
// -----------------------------------
-
Label miss;
__ mov(eax, Operand(esp, kPointerSize));
@@ -244,11 +241,10 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Get the map of the receiver.
__ mov(edx, FieldOperand(ecx, HeapObject::kMapOffset));
- // Check that the receiver does not require access checks. We need
- // to check this explicitly since this generic stub does not perform
- // map checks.
+
+ // Check bit field.
__ movzx_b(ebx, FieldOperand(edx, Map::kBitFieldOffset));
- __ test(ebx, Immediate(1 << Map::kIsAccessCheckNeeded));
+ __ test(ebx, Immediate(kSlowCaseBitFieldMask));
__ j(not_zero, &slow, not_taken);
// Check that the object is some kind of JS object EXCEPT JS Value type.
// In the case that the object is a value-wrapper object,
@@ -1040,7 +1036,6 @@ void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
// -- ...
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
-
Label miss, global_object, non_global_object;
// Get the receiver of the function from the stack; 1 ~ return address.
@@ -1179,7 +1174,6 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
// -- esp[0] : return address
// -- esp[4] : receiver
// -----------------------------------
-
Label miss, probe, global;
__ mov(eax, Operand(esp, kPointerSize));
@@ -1385,19 +1379,17 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : name
+ // -- edx : receiver
// -- esp[0] : return address
- // -- esp[4] : receiver
// -----------------------------------
- // Get the receiver from the stack and probe the stub cache.
- __ mov(edx, Operand(esp, 4));
Code::Flags flags = Code::ComputeFlags(Code::STORE_IC,
NOT_IN_LOOP,
MONOMORPHIC);
StubCache::GenerateProbe(masm, flags, edx, ecx, ebx, no_reg);
// Cache miss: Jump to runtime.
- Generate(masm, ExternalReference(IC_Utility(kStoreIC_Miss)));
+ GenerateMiss(masm);
}
@@ -1405,12 +1397,12 @@ void StoreIC::GenerateExtendStorage(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : transition map
+ // -- edx : receiver
// -- esp[0] : return address
- // -- esp[4] : receiver
// -----------------------------------
__ pop(ebx);
- __ push(Operand(esp, 0)); // receiver
+ __ push(edx); // receiver
__ push(ecx); // transition map
__ push(eax); // value
__ push(ebx); // return address
@@ -1421,23 +1413,22 @@ void StoreIC::GenerateExtendStorage(MacroAssembler* masm) {
}
-void StoreIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
+void StoreIC::GenerateMiss(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : name
+ // -- edx : receiver
// -- esp[0] : return address
- // -- esp[4] : receiver
// -----------------------------------
- // Move the return address below the arguments.
__ pop(ebx);
- __ push(Operand(esp, 0));
+ __ push(edx);
__ push(ecx);
__ push(eax);
__ push(ebx);
// Perform tail call to the entry.
- __ TailCallRuntime(f, 3, 1);
+ __ TailCallRuntime(ExternalReference(IC_Utility(kStoreIC_Miss)), 3, 1);
}
@@ -1452,7 +1443,6 @@ void KeyedStoreIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
// -- esp[8] : receiver
// -----------------------------------
- // Move the return address below the arguments.
__ pop(ecx);
__ push(Operand(esp, 1 * kPointerSize));
__ push(Operand(esp, 1 * kPointerSize));
@@ -1473,7 +1463,6 @@ void KeyedStoreIC::GenerateExtendStorage(MacroAssembler* masm) {
// -- esp[8] : receiver
// -----------------------------------
- // Move the return address below the arguments.
__ pop(ebx);
__ push(Operand(esp, 1 * kPointerSize));
__ push(ecx);
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index d7c7d3a23..19a380b02 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -147,6 +147,11 @@ void RecordWriteStub::Generate(MacroAssembler* masm) {
// All registers are clobbered by the operation.
void MacroAssembler::RecordWrite(Register object, int offset,
Register value, Register scratch) {
+ // The compiled code assumes that record write doesn't change the
+ // context register, so we check that none of the clobbered
+ // registers are esi.
+ ASSERT(!object.is(esi) && !value.is(esi) && !scratch.is(esi));
+
// First, check if a remembered set write is even needed. The tests below
// catch stores of Smis and stores into young gen (which does not have space
// for the remembered set bits.
@@ -210,6 +215,14 @@ void MacroAssembler::RecordWrite(Register object, int offset,
}
bind(&done);
+
+ // Clobber all input registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (FLAG_debug_code) {
+ mov(object, Immediate(bit_cast<int32_t>(kZapValue)));
+ mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
+ mov(scratch, Immediate(bit_cast<int32_t>(kZapValue)));
+ }
}
@@ -1098,10 +1111,14 @@ void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
return;
}
- Runtime::FunctionId function_id =
- static_cast<Runtime::FunctionId>(f->stub_id);
- RuntimeStub stub(function_id, num_arguments);
- CallStub(&stub);
+ // TODO(1236192): Most runtime routines don't need the number of
+ // arguments passed in because it is constant. At some point we
+ // should remove this need and make the runtime routine entry code
+ // smarter.
+ Set(eax, Immediate(num_arguments));
+ mov(ebx, Immediate(ExternalReference(f)));
+ CEntryStub ces(1);
+ CallStub(&ces);
}
@@ -1114,10 +1131,14 @@ Object* MacroAssembler::TryCallRuntime(Runtime::Function* f,
return Heap::undefined_value();
}
- Runtime::FunctionId function_id =
- static_cast<Runtime::FunctionId>(f->stub_id);
- RuntimeStub stub(function_id, num_arguments);
- return TryCallStub(&stub);
+ // TODO(1236192): Most runtime routines don't need the number of
+ // arguments passed in because it is constant. At some point we
+ // should remove this need and make the runtime routine entry code
+ // smarter.
+ Set(eax, Immediate(num_arguments));
+ mov(ebx, Immediate(ExternalReference(f)));
+ CEntryStub ces(1);
+ return TryCallStub(&ces);
}
@@ -1454,6 +1475,36 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
}
+void MacroAssembler::IncrementCounter(Condition cc,
+ StatsCounter* counter,
+ int value) {
+ ASSERT(value > 0);
+ if (FLAG_native_code_counters && counter->Enabled()) {
+ Label skip;
+ j(NegateCondition(cc), &skip);
+ pushfd();
+ IncrementCounter(counter, value);
+ popfd();
+ bind(&skip);
+ }
+}
+
+
+void MacroAssembler::DecrementCounter(Condition cc,
+ StatsCounter* counter,
+ int value) {
+ ASSERT(value > 0);
+ if (FLAG_native_code_counters && counter->Enabled()) {
+ Label skip;
+ j(NegateCondition(cc), &skip);
+ pushfd();
+ DecrementCounter(counter, value);
+ popfd();
+ bind(&skip);
+ }
+}
+
+
void MacroAssembler::Assert(Condition cc, const char* msg) {
if (FLAG_debug_code) Check(cc, msg);
}
@@ -1495,6 +1546,38 @@ void MacroAssembler::Abort(const char* msg) {
}
+void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register object1,
+ Register object2,
+ Register scratch1,
+ Register scratch2,
+ Label* failure) {
+ // Check that both objects are not smis.
+ ASSERT_EQ(0, kSmiTag);
+ mov(scratch1, Operand(object1));
+ and_(scratch1, Operand(object2));
+ test(scratch1, Immediate(kSmiTagMask));
+ j(zero, failure);
+
+ // Load instance type for both strings.
+ mov(scratch1, FieldOperand(object1, HeapObject::kMapOffset));
+ mov(scratch2, FieldOperand(object2, HeapObject::kMapOffset));
+ movzx_b(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
+ movzx_b(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
+
+ // Check that both are flat ascii strings.
+ const int kFlatAsciiStringMask =
+ kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
+ const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+ // Interleave bits from both instance types and compare them in one check.
+ ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
+ and_(scratch1, kFlatAsciiStringMask);
+ and_(scratch2, kFlatAsciiStringMask);
+ lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
+ cmp(scratch1, kFlatAsciiStringTag | (kFlatAsciiStringTag << 3));
+ j(not_equal, failure);
+}
+
+
CodePatcher::CodePatcher(byte* address, int size)
: address_(address), size_(size), masm_(address, size + Assembler::kGap) {
// Create a new macro assembler pointing to the address of the code to patch.
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index ceecebf70..cc245602d 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -331,7 +331,7 @@ class MacroAssembler: public Assembler {
// Eventually this should be used for all C calls.
void CallRuntime(Runtime::Function* f, int num_arguments);
- // Call a runtime function, returning the RuntimeStub object called.
+ // Call a runtime function, returning the CodeStub object called.
// Try to generate the stub code if necessary. Do not perform a GC
// but instead return a retry after GC failure.
Object* TryCallRuntime(Runtime::Function* f, int num_arguments);
@@ -392,6 +392,8 @@ class MacroAssembler: public Assembler {
void SetCounter(StatsCounter* counter, int value);
void IncrementCounter(StatsCounter* counter, int value);
void DecrementCounter(StatsCounter* counter, int value);
+ void IncrementCounter(Condition cc, StatsCounter* counter, int value);
+ void DecrementCounter(Condition cc, StatsCounter* counter, int value);
// ---------------------------------------------------------------------------
@@ -413,6 +415,17 @@ class MacroAssembler: public Assembler {
void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
bool allow_stub_calls() { return allow_stub_calls_; }
+ // ---------------------------------------------------------------------------
+ // String utilities.
+
+ // Checks if both objects are sequential ASCII strings, and jumps to label
+ // if either is not.
+ void JumpIfNotBothSequentialAsciiStrings(Register object1,
+ Register object2,
+ Register scratch1,
+ Register scratch2,
+ Label *on_not_flat_ascii_strings);
+
private:
List<Unresolved> unresolved_;
bool generating_stub_;
diff --git a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
index e41f9c3f0..f6da69379 100644
--- a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
@@ -59,8 +59,6 @@ namespace internal {
* call through the runtime system)
* - stack_area_base (High end of the memory area to use as
* backtracking stack)
- * - at_start (if 1, we are starting at the start of the
- * string, otherwise 0)
* - int* capture_array (int[num_saved_registers_], for output).
* - end of input (Address of end of string)
* - start of input (Address of first character in string)
@@ -74,6 +72,8 @@ namespace internal {
* - backup of caller ebx
* - Offset of location before start of input (effectively character
* position -1). Used to initialize capture registers to a non-position.
+ * - Boolean at start (if 1, we are starting at the start of the string,
+ * otherwise 0)
* - register 0 ebp[-4] (Only positions must be stored in the first
* - register 1 ebp[-8] num_saved_registers_ registers)
* - ...
@@ -539,46 +539,33 @@ bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type,
return true;
}
case 'w': {
- Label done, check_digits;
- __ cmp(Operand(current_character()), Immediate('9'));
- __ j(less_equal, &check_digits);
- __ cmp(Operand(current_character()), Immediate('_'));
- __ j(equal, &done);
- // Convert to lower case if letter.
- __ mov(Operand(eax), current_character());
- __ or_(eax, 0x20);
- // check current character in range ['a'..'z'], nondestructively.
- __ sub(Operand(eax), Immediate('a'));
- __ cmp(Operand(eax), Immediate('z' - 'a'));
- BranchOrBacktrack(above, on_no_match);
- __ jmp(&done);
- __ bind(&check_digits);
- // Check current character in range ['0'..'9'].
- __ cmp(Operand(current_character()), Immediate('0'));
- BranchOrBacktrack(below, on_no_match);
- __ bind(&done);
-
+ if (mode_ != ASCII) {
+ // Table is 128 entries, so all ASCII characters can be tested.
+ __ cmp(Operand(current_character()), Immediate('z'));
+ BranchOrBacktrack(above, on_no_match);
+ }
+ ASSERT_EQ(0, word_character_map[0]); // Character '\0' is not a word char.
+ ExternalReference word_map = ExternalReference::re_word_character_map();
+ __ test_b(current_character(),
+ Operand::StaticArray(current_character(), times_1, word_map));
+ BranchOrBacktrack(zero, on_no_match);
return true;
}
case 'W': {
- Label done, check_digits;
- __ cmp(Operand(current_character()), Immediate('9'));
- __ j(less_equal, &check_digits);
- __ cmp(Operand(current_character()), Immediate('_'));
- BranchOrBacktrack(equal, on_no_match);
- // Convert to lower case if letter.
- __ mov(Operand(eax), current_character());
- __ or_(eax, 0x20);
- // check current character in range ['a'..'z'], nondestructively.
- __ sub(Operand(eax), Immediate('a'));
- __ cmp(Operand(eax), Immediate('z' - 'a'));
- BranchOrBacktrack(below_equal, on_no_match);
- __ jmp(&done);
- __ bind(&check_digits);
- // Check current character in range ['0'..'9'].
- __ cmp(Operand(current_character()), Immediate('0'));
- BranchOrBacktrack(above_equal, on_no_match);
- __ bind(&done);
+ Label done;
+ if (mode_ != ASCII) {
+ // Table is 128 entries, so all ASCII characters can be tested.
+ __ cmp(Operand(current_character()), Immediate('z'));
+ __ j(above, &done);
+ }
+ ASSERT_EQ(0, word_character_map[0]); // Character '\0' is not a word char.
+ ExternalReference word_map = ExternalReference::re_word_character_map();
+ __ test_b(current_character(),
+ Operand::StaticArray(current_character(), times_1, word_map));
+ BranchOrBacktrack(not_zero, on_no_match);
+ if (mode_ != ASCII) {
+ __ bind(&done);
+ }
return true;
}
// Non-standard classes (with no syntactic shorthand) used internally.
@@ -638,6 +625,7 @@ Handle<Object> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
__ push(edi);
__ push(ebx); // Callee-save on MacOS.
__ push(Immediate(0)); // Make room for "input start - 1" constant.
+ __ push(Immediate(0)); // Make room for "at start" constant.
// Check if we have space on the stack for registers.
Label stack_limit_hit;
@@ -680,6 +668,15 @@ Handle<Object> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
// Store this value in a local variable, for use when clearing
// position registers.
__ mov(Operand(ebp, kInputStartMinusOne), eax);
+
+ // Determine whether the start index is zero, that is at the start of the
+ // string, and store that value in a local variable.
+ __ mov(ebx, Operand(ebp, kStartIndex));
+ __ xor_(Operand(ecx), ecx); // setcc only operates on cl (lower byte of ecx).
+ __ test(ebx, Operand(ebx));
+ __ setcc(zero, ecx); // 1 if 0 (start of string), 0 if positive.
+ __ mov(Operand(ebp, kAtStart), ecx);
+
if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
// Fill saved registers with initial value = start offset - 1
// Fill in stack push order, to avoid accessing across an unwritten
diff --git a/deps/v8/src/ia32/regexp-macro-assembler-ia32.h b/deps/v8/src/ia32/regexp-macro-assembler-ia32.h
index 8e7a6a5d3..d9866b72b 100644
--- a/deps/v8/src/ia32/regexp-macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/regexp-macro-assembler-ia32.h
@@ -123,8 +123,7 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
static const int kInputStart = kStartIndex + kPointerSize;
static const int kInputEnd = kInputStart + kPointerSize;
static const int kRegisterOutput = kInputEnd + kPointerSize;
- static const int kAtStart = kRegisterOutput + kPointerSize;
- static const int kStackHighEnd = kAtStart + kPointerSize;
+ static const int kStackHighEnd = kRegisterOutput + kPointerSize;
static const int kDirectCall = kStackHighEnd + kPointerSize;
// Below the frame pointer - local stack variables.
// When adding local variables remember to push space for them in
@@ -133,8 +132,9 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
static const int kBackup_edi = kBackup_esi - kPointerSize;
static const int kBackup_ebx = kBackup_edi - kPointerSize;
static const int kInputStartMinusOne = kBackup_ebx - kPointerSize;
+ static const int kAtStart = kInputStartMinusOne - kPointerSize;
// First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
+ static const int kRegisterZero = kAtStart - kPointerSize;
// Initial size of code buffer.
static const size_t kRegExpCodeSize = 1024;
diff --git a/deps/v8/src/ia32/simulator-ia32.h b/deps/v8/src/ia32/simulator-ia32.h
index 3ebd2e61b..94ef7bff9 100644
--- a/deps/v8/src/ia32/simulator-ia32.h
+++ b/deps/v8/src/ia32/simulator-ia32.h
@@ -53,8 +53,8 @@ class SimulatorStack : public v8::internal::AllStatic {
// Call the generated regexp code directly. The entry function pointer should
// expect eight int/pointer sized arguments and return an int.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
- entry(p0, p1, p2, p3, p4, p5, p6, p7)
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
+ entry(p0, p1, p2, p3, p4, p5, p6)
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
reinterpret_cast<TryCatch*>(try_catch_address)
diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc
index 218423031..7acf81c94 100644
--- a/deps/v8/src/ia32/stub-cache-ia32.cc
+++ b/deps/v8/src/ia32/stub-cache-ia32.cc
@@ -161,6 +161,7 @@ static void PushInterceptorArguments(MacroAssembler* masm,
__ push(holder);
__ push(name);
InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
+ ASSERT(!Heap::InNewSpace(interceptor));
__ mov(receiver, Immediate(Handle<Object>(interceptor)));
__ push(receiver);
__ push(FieldOperand(receiver, InterceptorInfo::kDataOffset));
@@ -347,19 +348,6 @@ static void CompileLoadInterceptor(Compiler* compiler,
}
-static void LookupPostInterceptor(JSObject* holder,
- String* name,
- LookupResult* lookup) {
- holder->LocalLookupRealNamedProperty(name, lookup);
- if (lookup->IsNotFound()) {
- Object* proto = holder->GetPrototype();
- if (proto != Heap::null_value()) {
- proto->Lookup(name, lookup);
- }
- }
-}
-
-
class LoadInterceptorCompiler BASE_EMBEDDED {
public:
explicit LoadInterceptorCompiler(Register name) : name_(name) {}
@@ -559,7 +547,6 @@ class CallInterceptorCompiler BASE_EMBEDDED {
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
// Jump to the cached code (tail call).
- ASSERT(function->is_compiled());
Handle<Code> code(function->code());
ParameterCount expected(function->shared()->formal_parameter_count());
__ InvokeCode(code, expected, arguments_,
@@ -997,50 +984,65 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
break;
case STRING_CHECK:
- // Check that the object is a two-byte string or a symbol.
- __ mov(eax, FieldOperand(edx, HeapObject::kMapOffset));
- __ movzx_b(eax, FieldOperand(eax, Map::kInstanceTypeOffset));
- __ cmp(eax, FIRST_NONSTRING_TYPE);
- __ j(above_equal, &miss, not_taken);
- // Check that the maps starting from the prototype haven't changed.
- GenerateLoadGlobalFunctionPrototype(masm(),
- Context::STRING_FUNCTION_INDEX,
- eax);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
- ebx, edx, name, &miss);
+ if (!function->IsBuiltin()) {
+ // Calling non-builtins with a value as receiver requires boxing.
+ __ jmp(&miss);
+ } else {
+ // Check that the object is a string or a symbol.
+ __ mov(eax, FieldOperand(edx, HeapObject::kMapOffset));
+ __ movzx_b(eax, FieldOperand(eax, Map::kInstanceTypeOffset));
+ __ cmp(eax, FIRST_NONSTRING_TYPE);
+ __ j(above_equal, &miss, not_taken);
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateLoadGlobalFunctionPrototype(masm(),
+ Context::STRING_FUNCTION_INDEX,
+ eax);
+ CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
+ ebx, edx, name, &miss);
+ }
break;
case NUMBER_CHECK: {
- Label fast;
- // Check that the object is a smi or a heap number.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &fast, taken);
- __ CmpObjectType(edx, HEAP_NUMBER_TYPE, eax);
- __ j(not_equal, &miss, not_taken);
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateLoadGlobalFunctionPrototype(masm(),
- Context::NUMBER_FUNCTION_INDEX,
- eax);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
- ebx, edx, name, &miss);
+ if (!function->IsBuiltin()) {
+ // Calling non-builtins with a value as receiver requires boxing.
+ __ jmp(&miss);
+ } else {
+ Label fast;
+ // Check that the object is a smi or a heap number.
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(zero, &fast, taken);
+ __ CmpObjectType(edx, HEAP_NUMBER_TYPE, eax);
+ __ j(not_equal, &miss, not_taken);
+ __ bind(&fast);
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateLoadGlobalFunctionPrototype(masm(),
+ Context::NUMBER_FUNCTION_INDEX,
+ eax);
+ CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
+ ebx, edx, name, &miss);
+ }
break;
}
case BOOLEAN_CHECK: {
- Label fast;
- // Check that the object is a boolean.
- __ cmp(edx, Factory::true_value());
- __ j(equal, &fast, taken);
- __ cmp(edx, Factory::false_value());
- __ j(not_equal, &miss, not_taken);
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateLoadGlobalFunctionPrototype(masm(),
- Context::BOOLEAN_FUNCTION_INDEX,
- eax);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
- ebx, edx, name, &miss);
+ if (!function->IsBuiltin()) {
+ // Calling non-builtins with a value as receiver requires boxing.
+ __ jmp(&miss);
+ } else {
+ Label fast;
+ // Check that the object is a boolean.
+ __ cmp(edx, Factory::true_value());
+ __ j(equal, &fast, taken);
+ __ cmp(edx, Factory::false_value());
+ __ j(not_equal, &miss, not_taken);
+ __ bind(&fast);
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateLoadGlobalFunctionPrototype(masm(),
+ Context::BOOLEAN_FUNCTION_INDEX,
+ eax);
+ CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
+ ebx, edx, name, &miss);
+ }
break;
}
@@ -1240,21 +1242,18 @@ Object* StoreStubCompiler::CompileStoreField(JSObject* object,
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : name
+ // -- edx : receiver
// -- esp[0] : return address
- // -- esp[4] : receiver
// -----------------------------------
Label miss;
- // Get the object from the stack.
- __ mov(ebx, Operand(esp, 1 * kPointerSize));
-
// Generate store field code. Trashes the name register.
GenerateStoreField(masm(),
Builtins::StoreIC_ExtendStorage,
object,
index,
transition,
- ebx, ecx, edx,
+ edx, ecx, ebx,
&miss);
// Handle store cache miss.
@@ -1274,26 +1273,23 @@ Object* StoreStubCompiler::CompileStoreCallback(JSObject* object,
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : name
+ // -- edx : receiver
// -- esp[0] : return address
- // -- esp[4] : receiver
// -----------------------------------
Label miss;
- // Get the object from the stack.
- __ mov(ebx, Operand(esp, 1 * kPointerSize));
-
// Check that the object isn't a smi.
- __ test(ebx, Immediate(kSmiTagMask));
+ __ test(edx, Immediate(kSmiTagMask));
__ j(zero, &miss, not_taken);
// Check that the map of the object hasn't changed.
- __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
+ __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
Immediate(Handle<Map>(object->map())));
__ j(not_equal, &miss, not_taken);
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(ebx, edx, &miss);
+ __ CheckAccessGlobalProxy(edx, ebx, &miss);
}
// Stub never generated for non-global objects that require access
@@ -1301,7 +1297,7 @@ Object* StoreStubCompiler::CompileStoreCallback(JSObject* object,
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
__ pop(ebx); // remove the return address
- __ push(Operand(esp, 0)); // receiver
+ __ push(edx); // receiver
__ push(Immediate(Handle<AccessorInfo>(callback))); // callback info
__ push(ecx); // name
__ push(eax); // value
@@ -1314,7 +1310,6 @@ Object* StoreStubCompiler::CompileStoreCallback(JSObject* object,
// Handle store cache miss.
__ bind(&miss);
- __ mov(ecx, Immediate(Handle<String>(name))); // restore name
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
__ jmp(ic, RelocInfo::CODE_TARGET);
@@ -1328,26 +1323,23 @@ Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : name
+ // -- edx : receiver
// -- esp[0] : return address
- // -- esp[4] : receiver
// -----------------------------------
Label miss;
- // Get the object from the stack.
- __ mov(ebx, Operand(esp, 1 * kPointerSize));
-
// Check that the object isn't a smi.
- __ test(ebx, Immediate(kSmiTagMask));
+ __ test(edx, Immediate(kSmiTagMask));
__ j(zero, &miss, not_taken);
// Check that the map of the object hasn't changed.
- __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
+ __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
Immediate(Handle<Map>(receiver->map())));
__ j(not_equal, &miss, not_taken);
// Perform global security token check if needed.
if (receiver->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(ebx, edx, &miss);
+ __ CheckAccessGlobalProxy(edx, ebx, &miss);
}
// Stub never generated for non-global objects that require access
@@ -1355,7 +1347,7 @@ Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
ASSERT(receiver->IsJSGlobalProxy() || !receiver->IsAccessCheckNeeded());
__ pop(ebx); // remove the return address
- __ push(Operand(esp, 0)); // receiver
+ __ push(edx); // receiver
__ push(ecx); // name
__ push(eax); // value
__ push(ebx); // restore return address
@@ -1367,7 +1359,6 @@ Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
// Handle store cache miss.
__ bind(&miss);
- __ mov(ecx, Immediate(Handle<String>(name))); // restore name
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
__ jmp(ic, RelocInfo::CODE_TARGET);
@@ -1382,14 +1373,13 @@ Object* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : name
+ // -- edx : receiver
// -- esp[0] : return address
- // -- esp[4] : receiver
// -----------------------------------
Label miss;
// Check that the map of the global has not changed.
- __ mov(ebx, Operand(esp, kPointerSize));
- __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
+ __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
Immediate(Handle<Map>(object->map())));
__ j(not_equal, &miss, not_taken);
diff --git a/deps/v8/src/ia32/virtual-frame-ia32.cc b/deps/v8/src/ia32/virtual-frame-ia32.cc
index 104d18750..9267507c7 100644
--- a/deps/v8/src/ia32/virtual-frame-ia32.cc
+++ b/deps/v8/src/ia32/virtual-frame-ia32.cc
@@ -899,31 +899,45 @@ Result VirtualFrame::CallKeyedLoadIC(RelocInfo::Mode mode) {
Result VirtualFrame::CallStoreIC() {
// Name, value, and receiver are on top of the frame. The IC
- // expects name in ecx, value in eax, and receiver on the stack. It
- // does not drop the receiver.
+ // expects name in ecx, value in eax, and receiver in edx.
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
Result name = Pop();
Result value = Pop();
- PrepareForCall(1, 0); // One stack arg, not callee-dropped.
+ Result receiver = Pop();
+ PrepareForCall(0, 0);
- if (value.is_register() && value.reg().is(ecx)) {
- if (name.is_register() && name.reg().is(eax)) {
+ // Optimized for case in which name is a constant value.
+ if (name.is_register() && (name.reg().is(edx) || name.reg().is(eax))) {
+ if (!is_used(ecx)) {
+ name.ToRegister(ecx);
+ } else if (!is_used(ebx)) {
+ name.ToRegister(ebx);
+ } else {
+ ASSERT(!is_used(edi)); // Only three results are live, so edi is free.
+ name.ToRegister(edi);
+ }
+ }
+ // Now name is not in edx or eax, so we can fix them, then move name to ecx.
+ if (value.is_register() && value.reg().is(edx)) {
+ if (receiver.is_register() && receiver.reg().is(eax)) {
// Wrong registers.
- __ xchg(eax, ecx);
+ __ xchg(eax, edx);
} else {
- // Register eax is free for value, which frees ecx for name.
+ // Register eax is free for value, which frees edx for receiver.
value.ToRegister(eax);
- name.ToRegister(ecx);
+ receiver.ToRegister(edx);
}
} else {
- // Register ecx is free for name, which guarantees eax is free for
+ // Register edx is free for receiver, which guarantees eax is free for
// value.
- name.ToRegister(ecx);
+ receiver.ToRegister(edx);
value.ToRegister(eax);
}
-
+ // Receiver and value are in the right place, so ecx is free for name.
+ name.ToRegister(ecx);
name.Unuse();
value.Unuse();
+ receiver.Unuse();
return RawCallCodeObject(ic, RelocInfo::CODE_TARGET);
}
diff --git a/deps/v8/src/ic.cc b/deps/v8/src/ic.cc
index 4edf6f18b..8fc9ddb86 100644
--- a/deps/v8/src/ic.cc
+++ b/deps/v8/src/ic.cc
@@ -378,6 +378,18 @@ Object* CallIC::TryCallAsFunction(Object* object) {
return *delegate;
}
+void CallIC::ReceiverToObject(Handle<Object> object) {
+ HandleScope scope;
+ Handle<Object> receiver(object);
+
+ // Change the receiver to the result of calling ToObject on it.
+ const int argc = this->target()->arguments_count();
+ StackFrameLocator locator;
+ JavaScriptFrame* frame = locator.FindJavaScriptFrame(0);
+ int index = frame->ComputeExpressionsCount() - (argc + 1);
+ frame->SetExpression(index, *Factory::ToObject(object));
+}
+
Object* CallIC::LoadFunction(State state,
Handle<Object> object,
@@ -388,6 +400,10 @@ Object* CallIC::LoadFunction(State state,
return TypeError("non_object_property_call", object, name);
}
+ if (object->IsString() || object->IsNumber() || object->IsBoolean()) {
+ ReceiverToObject(object);
+ }
+
// Check if the name is trivially convertible to an index and get
// the element if so.
uint32_t index;
@@ -1286,9 +1302,9 @@ Object* CallIC_Miss(Arguments args) {
Handle<JSFunction> function = Handle<JSFunction>(JSFunction::cast(result));
InLoopFlag in_loop = ic.target()->ic_in_loop();
if (in_loop == IN_LOOP) {
- CompileLazyInLoop(function, CLEAR_EXCEPTION);
+ CompileLazyInLoop(function, args.at<Object>(0), CLEAR_EXCEPTION);
} else {
- CompileLazy(function, CLEAR_EXCEPTION);
+ CompileLazy(function, args.at<Object>(0), CLEAR_EXCEPTION);
}
return *function;
}
@@ -1379,16 +1395,6 @@ Object* SharedStoreIC_ExtendStorage(Arguments args) {
}
-void StoreIC::GenerateInitialize(MacroAssembler* masm) {
- Generate(masm, ExternalReference(IC_Utility(kStoreIC_Miss)));
-}
-
-
-void StoreIC::GenerateMiss(MacroAssembler* masm) {
- Generate(masm, ExternalReference(IC_Utility(kStoreIC_Miss)));
-}
-
-
// Used from ic_<arch>.cc.
Object* KeyedStoreIC_Miss(Arguments args) {
NoHandleAllocation na;
diff --git a/deps/v8/src/ic.h b/deps/v8/src/ic.h
index 1dd7edf61..a991e30af 100644
--- a/deps/v8/src/ic.h
+++ b/deps/v8/src/ic.h
@@ -209,6 +209,8 @@ class CallIC: public IC {
// Otherwise, it returns the undefined value.
Object* TryCallAsFunction(Object* object);
+ void ReceiverToObject(Handle<Object> object);
+
static void Clear(Address address, Code* target);
friend class IC;
};
@@ -293,6 +295,13 @@ class KeyedLoadIC: public IC {
static void ClearInlinedVersion(Address address);
private:
+ // Bit mask to be tested against bit field for the cases when
+ // generic stub should go into slow case.
+ // Access check is necessary explicitly since generic stub does not perform
+ // map checks.
+ static const int kSlowCaseBitFieldMask =
+ (1 << Map::kIsAccessCheckNeeded) | (1 << Map::kHasIndexedInterceptor);
+
static void Generate(MacroAssembler* masm, const ExternalReference& f);
// Update the inline cache.
@@ -339,14 +348,12 @@ class StoreIC: public IC {
Handle<Object> value);
// Code generators for stub routines. Only called once at startup.
- static void GenerateInitialize(MacroAssembler* masm);
+ static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
static void GenerateMiss(MacroAssembler* masm);
static void GenerateMegamorphic(MacroAssembler* masm);
static void GenerateExtendStorage(MacroAssembler* masm);
private:
- static void Generate(MacroAssembler* masm, const ExternalReference& f);
-
// Update the inline cache and the global stub cache based on the
// lookup result.
void UpdateCaches(LookupResult* lookup,
diff --git a/deps/v8/src/json-delay.js b/deps/v8/src/json-delay.js
index 1a6f0085c..7788f516b 100644
--- a/deps/v8/src/json-delay.js
+++ b/deps/v8/src/json-delay.js
@@ -29,7 +29,7 @@ var $JSON = global.JSON;
function ParseJSONUnfiltered(text) {
var s = $String(text);
- var f = %CompileString("(" + text + ")", true);
+ var f = %CompileString(text, true);
return f();
}
diff --git a/deps/v8/src/jsregexp.cc b/deps/v8/src/jsregexp.cc
index 8af472d39..505cf03e5 100644
--- a/deps/v8/src/jsregexp.cc
+++ b/deps/v8/src/jsregexp.cc
@@ -4462,10 +4462,13 @@ void CharacterRange::Merge(ZoneList<CharacterRange>* first_set,
while (i1 < n1 || i2 < n2) {
CharacterRange next_range;
int range_source;
- if (i2 == n2 || first_set->at(i1).from() < second_set->at(i2).from()) {
+ if (i2 == n2 ||
+ (i1 < n1 && first_set->at(i1).from() < second_set->at(i2).from())) {
+ // Next smallest element is in first set.
next_range = first_set->at(i1++);
range_source = kInsideFirst;
} else {
+ // Next smallest element is in second set.
next_range = second_set->at(i2++);
range_source = kInsideSecond;
}
diff --git a/deps/v8/src/list.h b/deps/v8/src/list.h
index aff63c382..d3c2767a5 100644
--- a/deps/v8/src/list.h
+++ b/deps/v8/src/list.h
@@ -68,7 +68,8 @@ class List {
// not safe to use after operations that can change the list's
// backing store (eg, Add).
inline T& operator[](int i) const {
- ASSERT(0 <= i && i < length_);
+ ASSERT(0 <= i);
+ ASSERT(i < length_);
return data_[i];
}
inline T& at(int i) const { return operator[](i); }
diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc
index bbce926c8..5de7429e5 100644
--- a/deps/v8/src/log.cc
+++ b/deps/v8/src/log.cc
@@ -155,6 +155,13 @@ void StackTracer::Trace(TickSample* sample) {
return;
}
+ const Address functionAddr =
+ sample->fp + JavaScriptFrameConstants::kFunctionOffset;
+ if (SafeStackFrameIterator::IsWithinBounds(sample->sp, js_entry_sp,
+ functionAddr)) {
+ sample->function = Memory::Address_at(functionAddr) - kHeapObjectTag;
+ }
+
int i = 0;
const Address callback = Logger::current_state_ != NULL ?
Logger::current_state_->external_callback() : NULL;
@@ -162,11 +169,8 @@ void StackTracer::Trace(TickSample* sample) {
sample->stack[i++] = callback;
}
- SafeStackTraceFrameIterator it(
- reinterpret_cast<Address>(sample->fp),
- reinterpret_cast<Address>(sample->sp),
- reinterpret_cast<Address>(sample->sp),
- js_entry_sp);
+ SafeStackTraceFrameIterator it(sample->fp, sample->sp,
+ sample->sp, js_entry_sp);
while (!it.done() && i < TickSample::kMaxFramesCount) {
sample->stack[i++] = it.frame()->pc();
it.Advance();
@@ -837,10 +841,77 @@ void Logger::RegExpCodeCreateEvent(Code* code, String* source) {
void Logger::CodeMoveEvent(Address from, Address to) {
#ifdef ENABLE_LOGGING_AND_PROFILING
+ MoveEventInternal(CODE_MOVE_EVENT, from, to);
+#endif
+}
+
+
+void Logger::CodeDeleteEvent(Address from) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ DeleteEventInternal(CODE_DELETE_EVENT, from);
+#endif
+}
+
+
+void Logger::SnapshotPositionEvent(Address addr, int pos) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ if (!Log::IsEnabled() || !FLAG_log_snapshot_positions) return;
+ LogMessageBuilder msg;
+ msg.Append("%s,", log_events_[SNAPSHOT_POSITION_EVENT]);
+ msg.AppendAddress(addr);
+ msg.Append(",%d", pos);
+ if (FLAG_compress_log) {
+ ASSERT(compression_helper_ != NULL);
+ if (!compression_helper_->HandleMessage(&msg)) return;
+ }
+ msg.Append('\n');
+ msg.WriteToLogFile();
+#endif
+}
+
+
+void Logger::FunctionCreateEvent(JSFunction* function) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ static Address prev_code = NULL;
+ if (!Log::IsEnabled() || !FLAG_log_code) return;
+ LogMessageBuilder msg;
+ msg.Append("%s,", log_events_[FUNCTION_CREATION_EVENT]);
+ msg.AppendAddress(function->address());
+ msg.Append(',');
+ msg.AppendAddress(function->code()->address(), prev_code);
+ prev_code = function->code()->address();
+ if (FLAG_compress_log) {
+ ASSERT(compression_helper_ != NULL);
+ if (!compression_helper_->HandleMessage(&msg)) return;
+ }
+ msg.Append('\n');
+ msg.WriteToLogFile();
+#endif
+}
+
+
+void Logger::FunctionMoveEvent(Address from, Address to) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ MoveEventInternal(FUNCTION_MOVE_EVENT, from, to);
+#endif
+}
+
+
+void Logger::FunctionDeleteEvent(Address from) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ DeleteEventInternal(FUNCTION_DELETE_EVENT, from);
+#endif
+}
+
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+void Logger::MoveEventInternal(LogEventsAndTags event,
+ Address from,
+ Address to) {
static Address prev_to_ = NULL;
if (!Log::IsEnabled() || !FLAG_log_code) return;
LogMessageBuilder msg;
- msg.Append("%s,", log_events_[CODE_MOVE_EVENT]);
+ msg.Append("%s,", log_events_[event]);
msg.AppendAddress(from);
msg.Append(',');
msg.AppendAddress(to, prev_to_);
@@ -851,15 +922,15 @@ void Logger::CodeMoveEvent(Address from, Address to) {
}
msg.Append('\n');
msg.WriteToLogFile();
-#endif
}
+#endif
-void Logger::CodeDeleteEvent(Address from) {
#ifdef ENABLE_LOGGING_AND_PROFILING
+void Logger::DeleteEventInternal(LogEventsAndTags event, Address from) {
if (!Log::IsEnabled() || !FLAG_log_code) return;
LogMessageBuilder msg;
- msg.Append("%s,", log_events_[CODE_DELETE_EVENT]);
+ msg.Append("%s,", log_events_[event]);
msg.AppendAddress(from);
if (FLAG_compress_log) {
ASSERT(compression_helper_ != NULL);
@@ -867,8 +938,8 @@ void Logger::CodeDeleteEvent(Address from) {
}
msg.Append('\n');
msg.WriteToLogFile();
-#endif
}
+#endif
void Logger::ResourceEvent(const char* name, const char* tag) {
@@ -1052,13 +1123,17 @@ void Logger::DebugEvent(const char* event_type, Vector<uint16_t> parameter) {
void Logger::TickEvent(TickSample* sample, bool overflow) {
if (!Log::IsEnabled() || !FLAG_prof) return;
static Address prev_sp = NULL;
+ static Address prev_function = NULL;
LogMessageBuilder msg;
msg.Append("%s,", log_events_[TICK_EVENT]);
- Address prev_addr = reinterpret_cast<Address>(sample->pc);
+ Address prev_addr = sample->pc;
msg.AppendAddress(prev_addr);
msg.Append(',');
- msg.AppendAddress(reinterpret_cast<Address>(sample->sp), prev_sp);
- prev_sp = reinterpret_cast<Address>(sample->sp);
+ msg.AppendAddress(sample->sp, prev_sp);
+ prev_sp = sample->sp;
+ msg.Append(',');
+ msg.AppendAddress(sample->function, prev_function);
+ prev_function = sample->function;
msg.Append(",%d", static_cast<int>(sample->state));
if (overflow) {
msg.Append(",overflow");
@@ -1127,6 +1202,7 @@ void Logger::ResumeProfiler(int flags) {
LOG(UncheckedStringEvent("profiler", "resume"));
FLAG_log_code = true;
LogCompiledFunctions();
+ LogFunctionObjects();
LogAccessorCallbacks();
if (!FLAG_sliding_state_window) ticker_->Start();
}
@@ -1161,9 +1237,7 @@ static int EnumerateCompiledFunctions(Handle<SharedFunctionInfo>* sfis) {
AssertNoAllocation no_alloc;
int compiled_funcs_count = 0;
HeapIterator iterator;
- while (iterator.has_next()) {
- HeapObject* obj = iterator.next();
- ASSERT(obj != NULL);
+ for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
if (!obj->IsSharedFunctionInfo()) continue;
SharedFunctionInfo* sfi = SharedFunctionInfo::cast(obj);
if (sfi->is_compiled()
@@ -1273,12 +1347,22 @@ void Logger::LogCompiledFunctions() {
}
+void Logger::LogFunctionObjects() {
+ AssertNoAllocation no_alloc;
+ HeapIterator iterator;
+ for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
+ if (!obj->IsJSFunction()) continue;
+ JSFunction* jsf = JSFunction::cast(obj);
+ if (!jsf->is_compiled()) continue;
+ LOG(FunctionCreateEvent(jsf));
+ }
+}
+
+
void Logger::LogAccessorCallbacks() {
AssertNoAllocation no_alloc;
HeapIterator iterator;
- while (iterator.has_next()) {
- HeapObject* obj = iterator.next();
- ASSERT(obj != NULL);
+ for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
if (!obj->IsAccessorInfo()) continue;
AccessorInfo* ai = AccessorInfo::cast(obj);
if (!ai->name()->IsString()) continue;
diff --git a/deps/v8/src/log.h b/deps/v8/src/log.h
index 4d5acced6..1f6e60e1a 100644
--- a/deps/v8/src/log.h
+++ b/deps/v8/src/log.h
@@ -116,6 +116,10 @@ class VMState BASE_EMBEDDED {
V(CODE_CREATION_EVENT, "code-creation", "cc") \
V(CODE_MOVE_EVENT, "code-move", "cm") \
V(CODE_DELETE_EVENT, "code-delete", "cd") \
+ V(FUNCTION_CREATION_EVENT, "function-creation", "fc") \
+ V(FUNCTION_MOVE_EVENT, "function-move", "fm") \
+ V(FUNCTION_DELETE_EVENT, "function-delete", "fd") \
+ V(SNAPSHOT_POSITION_EVENT, "snapshot-pos", "sp") \
V(TICK_EVENT, "tick", "t") \
V(REPEAT_META_EVENT, "repeat", "r") \
V(BUILTIN_TAG, "Builtin", "bi") \
@@ -223,6 +227,14 @@ class Logger {
static void CodeMoveEvent(Address from, Address to);
// Emits a code delete event.
static void CodeDeleteEvent(Address from);
+ // Emits a function object create event.
+ static void FunctionCreateEvent(JSFunction* function);
+ // Emits a function move event.
+ static void FunctionMoveEvent(Address from, Address to);
+ // Emits a function delete event.
+ static void FunctionDeleteEvent(Address from);
+
+ static void SnapshotPositionEvent(Address addr, int pos);
// ==== Events logged by --log-gc. ====
// Heap sampling events: start, end, and individual types.
@@ -275,6 +287,8 @@ class Logger {
// Logs all compiled functions found in the heap.
static void LogCompiledFunctions();
+ // Logs all compiled JSFunction objects found in the heap.
+ static void LogFunctionObjects();
// Logs all accessor callbacks found in the heap.
static void LogAccessorCallbacks();
// Used for logging stubs found in the snapshot.
@@ -296,6 +310,15 @@ class Logger {
const char* name,
Address entry_point);
+ // Internal configurable move event.
+ static void MoveEventInternal(LogEventsAndTags event,
+ Address from,
+ Address to);
+
+ // Internal configurable move event.
+ static void DeleteEventInternal(LogEventsAndTags event,
+ Address from);
+
// Emits aliases for compressed messages.
static void LogAliases();
diff --git a/deps/v8/src/macros.py b/deps/v8/src/macros.py
index 1e436a0a1..c160b4916 100644
--- a/deps/v8/src/macros.py
+++ b/deps/v8/src/macros.py
@@ -92,6 +92,7 @@ macro IS_ERROR(arg) = (%_ClassOf(arg) === 'Error');
macro IS_SCRIPT(arg) = (%_ClassOf(arg) === 'Script');
macro IS_ARGUMENTS(arg) = (%_ClassOf(arg) === 'Arguments');
macro IS_GLOBAL(arg) = (%_ClassOf(arg) === 'global');
+macro IS_UNDETECTABLE(arg) = (%_IsUndetectableObject(arg));
macro FLOOR(arg) = $floor(arg);
# Inline macros. Use %IS_VAR to make sure arg is evaluated only once.
diff --git a/deps/v8/src/mark-compact.cc b/deps/v8/src/mark-compact.cc
index e284b4264..1f2c37d30 100644
--- a/deps/v8/src/mark-compact.cc
+++ b/deps/v8/src/mark-compact.cc
@@ -129,7 +129,8 @@ void MarkCompactCollector::Prepare(GCTracer* tracer) {
#endif
PagedSpaces spaces;
- while (PagedSpace* space = spaces.next()) {
+ for (PagedSpace* space = spaces.next();
+ space != NULL; space = spaces.next()) {
space->PrepareForMarkCompact(compacting_collection_);
}
@@ -172,7 +173,7 @@ void MarkCompactCollector::Finish() {
int old_gen_used = 0;
OldSpaces spaces;
- while (OldSpace* space = spaces.next()) {
+ for (OldSpace* space = spaces.next(); space != NULL; space = spaces.next()) {
old_gen_recoverable += space->Waste() + space->AvailableFree();
old_gen_used += space->Size();
}
@@ -475,8 +476,8 @@ void MarkCompactCollector::MarkDescriptorArray(
void MarkCompactCollector::CreateBackPointers() {
HeapObjectIterator iterator(Heap::map_space());
- while (iterator.has_next()) {
- Object* next_object = iterator.next();
+ for (HeapObject* next_object = iterator.next();
+ next_object != NULL; next_object = iterator.next()) {
if (next_object->IsMap()) { // Could also be ByteArray on free list.
Map* map = Map::cast(next_object);
if (map->instance_type() >= FIRST_JS_OBJECT_TYPE &&
@@ -509,8 +510,7 @@ static void ScanOverflowedObjects(T* it) {
// so that we don't waste effort pointlessly scanning for objects.
ASSERT(!marking_stack.is_full());
- while (it->has_next()) {
- HeapObject* object = it->next();
+ for (HeapObject* object = it->next(); object != NULL; object = it->next()) {
if (object->IsOverflowed()) {
object->ClearOverflow();
ASSERT(object->IsMarked());
@@ -793,8 +793,9 @@ void MarkCompactCollector::ClearNonLiveTransitions() {
// scan the descriptor arrays of those maps, not all maps.
// All of these actions are carried out only on maps of JSObjects
// and related subtypes.
- while (map_iterator.has_next()) {
- Map* map = reinterpret_cast<Map*>(map_iterator.next());
+ for (HeapObject* obj = map_iterator.next();
+ obj != NULL; obj = map_iterator.next()) {
+ Map* map = reinterpret_cast<Map*>(obj);
if (!map->IsMarked() && map->IsByteArray()) continue;
ASSERT(SafeIsMap(map));
@@ -969,12 +970,6 @@ inline void EncodeForwardingAddressInPagedSpace(HeapObject* old_object,
inline void IgnoreNonLiveObject(HeapObject* object) {}
-// A code deletion event is logged for non-live code objects.
-inline void LogNonLiveCodeObject(HeapObject* object) {
- if (object->IsCode()) LOG(CodeDeleteEvent(object->address()));
-}
-
-
// Function template that, given a range of addresses (eg, a semispace or a
// paged space page), iterates through the objects in the range to clear
// mark bits and compute and encode forwarding addresses. As a side effect,
@@ -1122,10 +1117,7 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
is_previous_alive = true;
}
} else {
- if (object->IsCode()) {
- // Notify the logger that compiled code has been collected.
- LOG(CodeDeleteEvent(Code::cast(object)->address()));
- }
+ MarkCompactCollector::ReportDeleteIfNeeded(object);
if (is_previous_alive) { // Transition from live to free.
free_start = current;
is_previous_alive = false;
@@ -1204,7 +1196,7 @@ void MarkCompactCollector::EncodeForwardingAddresses() {
// Compute the forwarding pointers in each space.
EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldPointerSpace,
- IgnoreNonLiveObject>(
+ ReportDeleteIfNeeded>(
Heap::old_pointer_space());
EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldDataSpace,
@@ -1212,7 +1204,7 @@ void MarkCompactCollector::EncodeForwardingAddresses() {
Heap::old_data_space());
EncodeForwardingAddressesInPagedSpace<MCAllocateFromCodeSpace,
- LogNonLiveCodeObject>(
+ ReportDeleteIfNeeded>(
Heap::code_space());
EncodeForwardingAddressesInPagedSpace<MCAllocateFromCellSpace,
@@ -1291,6 +1283,7 @@ class MapCompact {
MapIterator it;
HeapObject* o = it.next();
for (; o != first_map_to_evacuate_; o = it.next()) {
+ ASSERT(o != NULL);
Map* map = reinterpret_cast<Map*>(o);
ASSERT(!map->IsMarked());
ASSERT(!map->IsOverflowed());
@@ -1316,10 +1309,8 @@ class MapCompact {
void UpdateMapPointersInLargeObjectSpace() {
LargeObjectIterator it(Heap::lo_space());
- while (true) {
- if (!it.has_next()) break;
- UpdateMapPointersInObject(it.next());
- }
+ for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
+ UpdateMapPointersInObject(obj);
}
void Finish() {
@@ -1362,8 +1353,8 @@ class MapCompact {
static Map* NextMap(MapIterator* it, HeapObject* last, bool live) {
while (true) {
- ASSERT(it->has_next());
HeapObject* next = it->next();
+ ASSERT(next != NULL);
if (next == last)
return NULL;
ASSERT(!next->IsOverflowed());
@@ -1452,8 +1443,9 @@ class MapCompact {
if (!FLAG_enable_slow_asserts)
return;
- while (map_to_evacuate_it_.has_next())
- ASSERT(FreeListNode::IsFreeListNode(map_to_evacuate_it_.next()));
+ for (HeapObject* obj = map_to_evacuate_it_.next();
+ obj != NULL; obj = map_to_evacuate_it_.next())
+ ASSERT(FreeListNode::IsFreeListNode(obj));
}
#endif
};
@@ -1486,7 +1478,8 @@ void MarkCompactCollector::SweepSpaces() {
map_compact.FinishMapSpace();
PagedSpaces spaces;
- while (PagedSpace* space = spaces.next()) {
+ for (PagedSpace* space = spaces.next();
+ space != NULL; space = spaces.next()) {
if (space == Heap::map_space()) continue;
map_compact.UpdateMapPointersInPagedSpace(space);
}
@@ -1661,7 +1654,8 @@ void MarkCompactCollector::UpdatePointers() {
// Large objects do not move, the map word can be updated directly.
LargeObjectIterator it(Heap::lo_space());
- while (it.has_next()) UpdatePointersInNewObject(it.next());
+ for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
+ UpdatePointersInNewObject(obj);
USE(live_maps);
USE(live_pointer_olds);
@@ -1825,7 +1819,8 @@ void MarkCompactCollector::RelocateObjects() {
Page::set_rset_state(Page::IN_USE);
#endif
PagedSpaces spaces;
- while (PagedSpace* space = spaces.next()) space->MCCommitRelocationInfo();
+ for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
+ space->MCCommitRelocationInfo();
}
@@ -1906,6 +1901,11 @@ int MarkCompactCollector::RelocateOldNonCodeObject(HeapObject* obj,
ASSERT(!HeapObject::FromAddress(new_addr)->IsCode());
+ HeapObject* copied_to = HeapObject::FromAddress(new_addr);
+ if (copied_to->IsJSFunction()) {
+ LOG(FunctionMoveEvent(old_addr, new_addr));
+ }
+
return obj_size;
}
@@ -1986,6 +1986,11 @@ int MarkCompactCollector::RelocateNewObject(HeapObject* obj) {
}
#endif
+ HeapObject* copied_to = HeapObject::FromAddress(new_addr);
+ if (copied_to->IsJSFunction()) {
+ LOG(FunctionMoveEvent(old_addr, new_addr));
+ }
+
return obj_size;
}
@@ -2001,4 +2006,15 @@ void MarkCompactCollector::RebuildRSets() {
Heap::RebuildRSets();
}
+
+void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ if (obj->IsCode()) {
+ LOG(CodeDeleteEvent(obj->address()));
+ } else if (obj->IsJSFunction()) {
+ LOG(FunctionDeleteEvent(obj->address()));
+ }
+#endif
+}
+
} } // namespace v8::internal
diff --git a/deps/v8/src/mark-compact.h b/deps/v8/src/mark-compact.h
index 02aedb3ac..ab572f699 100644
--- a/deps/v8/src/mark-compact.h
+++ b/deps/v8/src/mark-compact.h
@@ -115,6 +115,9 @@ class MarkCompactCollector: public AllStatic {
static bool in_use() { return state_ > PREPARE_GC; }
#endif
+ // Determine type of object and emit deletion log event.
+ static void ReportDeleteIfNeeded(HeapObject* obj);
+
private:
#ifdef DEBUG
enum CollectorState {
diff --git a/deps/v8/src/messages.js b/deps/v8/src/messages.js
index d3c8fcca4..df008c91b 100644
--- a/deps/v8/src/messages.js
+++ b/deps/v8/src/messages.js
@@ -178,8 +178,7 @@ function FormatMessage(message) {
result_not_primitive: "Result of %0 must be a primitive, was %1",
invalid_json: "String '%0' is not valid JSON",
circular_structure: "Converting circular structure to JSON",
- object_keys_non_object: "Object.keys called on non-object",
- object_get_prototype_non_object: "Object.getPrototypeOf called on non-object",
+ obj_ctor_property_non_object: "Object.%0 called on non-object",
array_indexof_not_defined: "Array.getIndexOf: Argument undefined"
};
}
diff --git a/deps/v8/src/mirror-delay.js b/deps/v8/src/mirror-delay.js
index ba663b2a2..1487ce57c 100644
--- a/deps/v8/src/mirror-delay.js
+++ b/deps/v8/src/mirror-delay.js
@@ -600,14 +600,14 @@ ObjectMirror.prototype.protoObject = function() {
ObjectMirror.prototype.hasNamedInterceptor = function() {
// Get information on interceptors for this object.
- var x = %DebugInterceptorInfo(this.value_);
+ var x = %GetInterceptorInfo(this.value_);
return (x & 2) != 0;
};
ObjectMirror.prototype.hasIndexedInterceptor = function() {
// Get information on interceptors for this object.
- var x = %DebugInterceptorInfo(this.value_);
+ var x = %GetInterceptorInfo(this.value_);
return (x & 1) != 0;
};
@@ -631,13 +631,13 @@ ObjectMirror.prototype.propertyNames = function(kind, limit) {
// Find all the named properties.
if (kind & PropertyKind.Named) {
// Get the local property names.
- propertyNames = %DebugLocalPropertyNames(this.value_);
+ propertyNames = %GetLocalPropertyNames(this.value_);
total += propertyNames.length;
// Get names for named interceptor properties if any.
if (this.hasNamedInterceptor() && (kind & PropertyKind.Named)) {
var namedInterceptorNames =
- %DebugNamedInterceptorPropertyNames(this.value_);
+ %GetNamedInterceptorPropertyNames(this.value_);
if (namedInterceptorNames) {
propertyNames = propertyNames.concat(namedInterceptorNames);
total += namedInterceptorNames.length;
@@ -648,13 +648,13 @@ ObjectMirror.prototype.propertyNames = function(kind, limit) {
// Find all the indexed properties.
if (kind & PropertyKind.Indexed) {
// Get the local element names.
- elementNames = %DebugLocalElementNames(this.value_);
+ elementNames = %GetLocalElementNames(this.value_);
total += elementNames.length;
// Get names for indexed interceptor properties.
if (this.hasIndexedInterceptor() && (kind & PropertyKind.Indexed)) {
var indexedInterceptorNames =
- %DebugIndexedInterceptorElementNames(this.value_);
+ %GetIndexedInterceptorElementNames(this.value_);
if (indexedInterceptorNames) {
elementNames = elementNames.concat(indexedInterceptorNames);
total += indexedInterceptorNames.length;
@@ -2089,8 +2089,10 @@ JSONProtocolSerializer.prototype.serialize_ = function(mirror, reference,
content.evalFromScript =
this.serializeReference(mirror.evalFromScript());
var evalFromLocation = mirror.evalFromLocation()
- content.evalFromLocation = { line: evalFromLocation.line,
- column: evalFromLocation.column}
+ if (evalFromLocation) {
+ content.evalFromLocation = { line: evalFromLocation.line,
+ column: evalFromLocation.column };
+ }
if (mirror.evalFromFunctionName()) {
content.evalFromFunctionName = mirror.evalFromFunctionName();
}
diff --git a/deps/v8/src/mksnapshot.cc b/deps/v8/src/mksnapshot.cc
index b889e2e20..6457ae742 100644
--- a/deps/v8/src/mksnapshot.cc
+++ b/deps/v8/src/mksnapshot.cc
@@ -130,6 +130,10 @@ class CppByteSink : public i::SnapshotByteSink {
}
}
+ virtual int Position() {
+ return bytes_written_;
+ }
+
private:
FILE* fp_;
int bytes_written_;
@@ -160,10 +164,10 @@ int main(int argc, char** argv) {
}
context.Dispose();
CppByteSink sink(argv[1]);
- i::Serializer ser(&sink);
// This results in a somewhat smaller snapshot, probably because it gets rid
// of some things that are cached between garbage collections.
i::Heap::CollectAllGarbage(true);
+ i::StartupSerializer ser(&sink);
ser.Serialize();
return 0;
}
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index 300334246..4355fe9e1 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -1349,7 +1349,7 @@ void FixedArray::set(int index, Object* value) {
}
-WriteBarrierMode HeapObject::GetWriteBarrierMode() {
+WriteBarrierMode HeapObject::GetWriteBarrierMode(const AssertNoAllocation&) {
if (Heap::InNewSpace(this)) return SKIP_WRITE_BARRIER;
return UPDATE_WRITE_BARRIER;
}
@@ -1367,6 +1367,7 @@ void FixedArray::set(int index,
void FixedArray::fast_set(FixedArray* array, int index, Object* value) {
ASSERT(index >= 0 && index < array->length());
+ ASSERT(!Heap::InNewSpace(value));
WRITE_FIELD(array, kHeaderSize + index * kPointerSize, value);
}
@@ -1547,9 +1548,7 @@ uint32_t NumberDictionary::max_number_key() {
}
void NumberDictionary::set_requires_slow_elements() {
- set(kMaxNumberKeyIndex,
- Smi::FromInt(kRequiresSlowElementsMask),
- SKIP_WRITE_BARRIER);
+ set(kMaxNumberKeyIndex, Smi::FromInt(kRequiresSlowElementsMask));
}
@@ -2372,8 +2371,8 @@ BOOL_GETTER(SharedFunctionInfo, compiler_hints,
kHasOnlySimpleThisPropertyAssignments)
BOOL_ACCESSORS(SharedFunctionInfo,
compiler_hints,
- try_fast_codegen,
- kTryFastCodegen)
+ try_full_codegen,
+ kTryFullCodegen)
INT_ACCESSORS(SharedFunctionInfo, length, kLengthOffset)
INT_ACCESSORS(SharedFunctionInfo, formal_parameter_count,
@@ -2972,7 +2971,8 @@ void Dictionary<Shape, Key>::SetEntry(int entry,
PropertyDetails details) {
ASSERT(!key->IsString() || details.IsDeleted() || details.index() > 0);
int index = HashTable<Shape, Key>::EntryToIndex(entry);
- WriteBarrierMode mode = FixedArray::GetWriteBarrierMode();
+ AssertNoAllocation no_gc;
+ WriteBarrierMode mode = FixedArray::GetWriteBarrierMode(no_gc);
FixedArray::set(index, key, mode);
FixedArray::set(index+1, value, mode);
FixedArray::fast_set(this, index+2, details.AsSmi());
@@ -3006,8 +3006,13 @@ void JSArray::EnsureSize(int required_size) {
}
+void JSArray::set_length(Smi* length) {
+ set_length(static_cast<Object*>(length), SKIP_WRITE_BARRIER);
+}
+
+
void JSArray::SetContent(FixedArray* storage) {
- set_length(Smi::FromInt(storage->length()), SKIP_WRITE_BARRIER);
+ set_length(Smi::FromInt(storage->length()));
set_elements(storage);
}
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index 118c4891d..6dd1d4924 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -2839,7 +2839,11 @@ Object* JSObject::DefineGetterSetter(String* name,
if (result.IsReadOnly()) return Heap::undefined_value();
if (result.type() == CALLBACKS) {
Object* obj = result.GetCallbackObject();
- if (obj->IsFixedArray()) return obj;
+ if (obj->IsFixedArray()) {
+ PropertyDetails details = PropertyDetails(attributes, CALLBACKS);
+ SetNormalizedProperty(name, obj, details);
+ return obj;
+ }
}
}
}
@@ -3196,8 +3200,9 @@ Object* FixedArray::UnionOfKeys(FixedArray* other) {
Object* obj = Heap::AllocateFixedArray(len0 + extra);
if (obj->IsFailure()) return obj;
// Fill in the content
+ AssertNoAllocation no_gc;
FixedArray* result = FixedArray::cast(obj);
- WriteBarrierMode mode = result->GetWriteBarrierMode();
+ WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
for (int i = 0; i < len0; i++) {
result->set(i, get(i), mode);
}
@@ -3221,10 +3226,11 @@ Object* FixedArray::CopySize(int new_length) {
if (obj->IsFailure()) return obj;
FixedArray* result = FixedArray::cast(obj);
// Copy the content
+ AssertNoAllocation no_gc;
int len = length();
if (new_length < len) len = new_length;
result->set_map(map());
- WriteBarrierMode mode = result->GetWriteBarrierMode();
+ WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
for (int i = 0; i < len; i++) {
result->set(i, get(i), mode);
}
@@ -3233,7 +3239,8 @@ Object* FixedArray::CopySize(int new_length) {
void FixedArray::CopyTo(int pos, FixedArray* dest, int dest_pos, int len) {
- WriteBarrierMode mode = dest->GetWriteBarrierMode();
+ AssertNoAllocation no_gc;
+ WriteBarrierMode mode = dest->GetWriteBarrierMode(no_gc);
for (int index = 0; index < len; index++) {
dest->set(dest_pos+index, get(pos+index), mode);
}
@@ -3267,8 +3274,7 @@ Object* DescriptorArray::Allocate(int number_of_descriptors) {
if (array->IsFailure()) return array;
result->set(kContentArrayIndex, array);
result->set(kEnumerationIndexIndex,
- Smi::FromInt(PropertyDetails::kInitialIndex),
- SKIP_WRITE_BARRIER);
+ Smi::FromInt(PropertyDetails::kInitialIndex));
return result;
}
@@ -4696,8 +4702,8 @@ void Map::ClearNonLiveTransitions(Object* real_prototype) {
ASSERT(target->IsHeapObject());
if (!target->IsMarked()) {
ASSERT(target->IsMap());
- contents->set(i + 1, NullDescriptorDetails, SKIP_WRITE_BARRIER);
- contents->set(i, Heap::null_value(), SKIP_WRITE_BARRIER);
+ contents->set(i + 1, NullDescriptorDetails);
+ contents->set_null(i);
ASSERT(target->prototype() == this ||
target->prototype() == real_prototype);
// Getter prototype() is read-only, set_prototype() has side effects.
@@ -5157,7 +5163,8 @@ void JSObject::SetFastElements(FixedArray* elems) {
uint32_t len = static_cast<uint32_t>(elems->length());
for (uint32_t i = 0; i < len; i++) ASSERT(elems->get(i)->IsTheHole());
#endif
- WriteBarrierMode mode = elems->GetWriteBarrierMode();
+ AssertNoAllocation no_gc;
+ WriteBarrierMode mode = elems->GetWriteBarrierMode(no_gc);
switch (GetElementsKind()) {
case FAST_ELEMENTS: {
FixedArray* old_elements = FixedArray::cast(elements());
@@ -5224,7 +5231,7 @@ Object* JSObject::SetSlowElements(Object* len) {
Object* JSArray::Initialize(int capacity) {
ASSERT(capacity >= 0);
- set_length(Smi::FromInt(0), SKIP_WRITE_BARRIER);
+ set_length(Smi::FromInt(0));
FixedArray* new_elements;
if (capacity == 0) {
new_elements = Heap::empty_fixed_array();
@@ -5284,7 +5291,7 @@ Object* JSObject::SetElementsLength(Object* len) {
for (int i = value; i < old_length; i++) {
FixedArray::cast(elements())->set_the_hole(i);
}
- JSArray::cast(this)->set_length(smi_length, SKIP_WRITE_BARRIER);
+ JSArray::cast(this)->set_length(Smi::cast(smi_length));
}
return this;
}
@@ -5294,8 +5301,9 @@ Object* JSObject::SetElementsLength(Object* len) {
!ShouldConvertToSlowElements(new_capacity)) {
Object* obj = Heap::AllocateFixedArrayWithHoles(new_capacity);
if (obj->IsFailure()) return obj;
- if (IsJSArray()) JSArray::cast(this)->set_length(smi_length,
- SKIP_WRITE_BARRIER);
+ if (IsJSArray()) {
+ JSArray::cast(this)->set_length(Smi::cast(smi_length));
+ }
SetFastElements(FixedArray::cast(obj));
return this;
}
@@ -5314,7 +5322,7 @@ Object* JSObject::SetElementsLength(Object* len) {
static_cast<uint32_t>(JSArray::cast(this)->length()->Number());
element_dictionary()->RemoveNumberEntries(value, old_length);
}
- JSArray::cast(this)->set_length(smi_length, SKIP_WRITE_BARRIER);
+ JSArray::cast(this)->set_length(Smi::cast(smi_length));
}
return this;
}
@@ -5339,8 +5347,7 @@ Object* JSObject::SetElementsLength(Object* len) {
Object* obj = Heap::AllocateFixedArray(1);
if (obj->IsFailure()) return obj;
FixedArray::cast(obj)->set(0, len);
- if (IsJSArray()) JSArray::cast(this)->set_length(Smi::FromInt(1),
- SKIP_WRITE_BARRIER);
+ if (IsJSArray()) JSArray::cast(this)->set_length(Smi::FromInt(1));
set_elements(FixedArray::cast(obj));
return this;
}
@@ -5610,8 +5617,7 @@ Object* JSObject::SetFastElement(uint32_t index, Object* value) {
CHECK(Array::IndexFromObject(JSArray::cast(this)->length(),
&array_length));
if (index >= array_length) {
- JSArray::cast(this)->set_length(Smi::FromInt(index + 1),
- SKIP_WRITE_BARRIER);
+ JSArray::cast(this)->set_length(Smi::FromInt(index + 1));
}
}
return value;
@@ -5627,8 +5633,9 @@ Object* JSObject::SetFastElement(uint32_t index, Object* value) {
Object* obj = Heap::AllocateFixedArrayWithHoles(new_capacity);
if (obj->IsFailure()) return obj;
SetFastElements(FixedArray::cast(obj));
- if (IsJSArray()) JSArray::cast(this)->set_length(Smi::FromInt(index + 1),
- SKIP_WRITE_BARRIER);
+ if (IsJSArray()) {
+ JSArray::cast(this)->set_length(Smi::FromInt(index + 1));
+ }
FixedArray::cast(elements())->set(index, value);
return value;
}
@@ -6125,7 +6132,8 @@ template<typename Shape, typename Key>
void Dictionary<Shape, Key>::CopyValuesTo(FixedArray* elements) {
int pos = 0;
int capacity = HashTable<Shape, Key>::Capacity();
- WriteBarrierMode mode = elements->GetWriteBarrierMode();
+ AssertNoAllocation no_gc;
+ WriteBarrierMode mode = elements->GetWriteBarrierMode(no_gc);
for (int i = 0; i < capacity; i++) {
Object* k = Dictionary<Shape, Key>::KeyAt(i);
if (Dictionary<Shape, Key>::IsKey(k)) {
@@ -6496,7 +6504,7 @@ int JSObject::GetLocalElementKeys(FixedArray* storage,
for (int i = 0; i < length; i++) {
if (!FixedArray::cast(elements())->get(i)->IsTheHole()) {
if (storage != NULL) {
- storage->set(counter, Smi::FromInt(i), SKIP_WRITE_BARRIER);
+ storage->set(counter, Smi::FromInt(i));
}
counter++;
}
@@ -6508,7 +6516,7 @@ int JSObject::GetLocalElementKeys(FixedArray* storage,
int length = PixelArray::cast(elements())->length();
while (counter < length) {
if (storage != NULL) {
- storage->set(counter, Smi::FromInt(counter), SKIP_WRITE_BARRIER);
+ storage->set(counter, Smi::FromInt(counter));
}
counter++;
}
@@ -6525,7 +6533,7 @@ int JSObject::GetLocalElementKeys(FixedArray* storage,
int length = ExternalArray::cast(elements())->length();
while (counter < length) {
if (storage != NULL) {
- storage->set(counter, Smi::FromInt(counter), SKIP_WRITE_BARRIER);
+ storage->set(counter, Smi::FromInt(counter));
}
counter++;
}
@@ -6550,7 +6558,7 @@ int JSObject::GetLocalElementKeys(FixedArray* storage,
String* str = String::cast(val);
if (storage) {
for (int i = 0; i < str->length(); i++) {
- storage->set(counter + i, Smi::FromInt(i), SKIP_WRITE_BARRIER);
+ storage->set(counter + i, Smi::FromInt(i));
}
}
counter += str->length();
@@ -6882,8 +6890,10 @@ Object* HashTable<Shape, Key>::EnsureCapacity(int n, Key key) {
Object* obj = Allocate(nof * 2);
if (obj->IsFailure()) return obj;
+
+ AssertNoAllocation no_gc;
HashTable* table = HashTable::cast(obj);
- WriteBarrierMode mode = table->GetWriteBarrierMode();
+ WriteBarrierMode mode = table->GetWriteBarrierMode(no_gc);
// Copy prefix to new array.
for (int i = kPrefixStartIndex;
@@ -7130,7 +7140,7 @@ Object* JSObject::PrepareElementsForSort(uint32_t limit) {
// Split elements into defined, undefined and the_hole, in that order.
// Only count locations for undefined and the hole, and fill them afterwards.
- WriteBarrierMode write_barrier = elements->GetWriteBarrierMode();
+ WriteBarrierMode write_barrier = elements->GetWriteBarrierMode(no_alloc);
unsigned int undefs = limit;
unsigned int holes = limit;
// Assume most arrays contain no holes and undefined values, so minimize the
@@ -7625,7 +7635,7 @@ Object* Dictionary<Shape, Key>::GenerateNewEnumerationIndices() {
if (obj->IsFailure()) return obj;
FixedArray* iteration_order = FixedArray::cast(obj);
for (int i = 0; i < length; i++) {
- iteration_order->set(i, Smi::FromInt(i), SKIP_WRITE_BARRIER);
+ iteration_order->set(i, Smi::FromInt(i));
}
// Allocate array with enumeration order.
@@ -7638,9 +7648,7 @@ Object* Dictionary<Shape, Key>::GenerateNewEnumerationIndices() {
int pos = 0;
for (int i = 0; i < capacity; i++) {
if (Dictionary<Shape, Key>::IsKey(Dictionary<Shape, Key>::KeyAt(i))) {
- enumeration_order->set(pos++,
- Smi::FromInt(DetailsAt(i).index()),
- SKIP_WRITE_BARRIER);
+ enumeration_order->set(pos++, Smi::FromInt(DetailsAt(i).index()));
}
}
@@ -7651,9 +7659,7 @@ Object* Dictionary<Shape, Key>::GenerateNewEnumerationIndices() {
for (int i = 0; i < length; i++) {
int index = Smi::cast(iteration_order->get(i))->value();
int enum_index = PropertyDetails::kInitialIndex + i;
- enumeration_order->set(index,
- Smi::FromInt(enum_index),
- SKIP_WRITE_BARRIER);
+ enumeration_order->set(index, Smi::FromInt(enum_index));
}
// Update the dictionary with new indices.
@@ -7801,8 +7807,7 @@ void NumberDictionary::UpdateMaxNumberKey(uint32_t key) {
Object* max_index_object = get(kMaxNumberKeyIndex);
if (!max_index_object->IsSmi() || max_number_key() < key) {
FixedArray::set(kMaxNumberKeyIndex,
- Smi::FromInt(key << kRequiresSlowElementsTagSize),
- SKIP_WRITE_BARRIER);
+ Smi::FromInt(key << kRequiresSlowElementsTagSize));
}
}
@@ -7893,9 +7898,7 @@ void StringDictionary::CopyEnumKeysTo(FixedArray* storage,
PropertyDetails details = DetailsAt(i);
if (details.IsDeleted() || details.IsDontEnum()) continue;
storage->set(index, k);
- sort_array->set(index,
- Smi::FromInt(details.index()),
- SKIP_WRITE_BARRIER);
+ sort_array->set(index, Smi::FromInt(details.index()));
index++;
}
}
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index 5d088e523..f6411965a 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -1023,8 +1023,12 @@ class HeapObject: public Object {
// Casting.
static inline HeapObject* cast(Object* obj);
- // Return the write barrier mode for this.
- inline WriteBarrierMode GetWriteBarrierMode();
+ // Return the write barrier mode for this. Callers of this function
+ // must be able to present a reference to an AssertNoAllocation
+ // object as a sign that they are not going to use this function
+ // from code that allocates and thus invalidates the returned write
+ // barrier mode.
+ inline WriteBarrierMode GetWriteBarrierMode(const AssertNoAllocation&);
// Dispatched behavior.
void HeapObjectShortPrint(StringStream* accumulator);
@@ -1669,7 +1673,8 @@ class FixedArray: public Array {
void SortPairs(FixedArray* numbers, uint32_t len);
protected:
- // Set operation on FixedArray without using write barriers.
+ // Set operation on FixedArray without using write barriers. Can
+ // only be used for storing old space objects or smis.
static inline void fast_set(FixedArray* array, int index, Object* value);
private:
@@ -2889,6 +2894,14 @@ class Map: public HeapObject {
return ((1 << kHasInstanceCallHandler) & bit_field()) != 0;
}
+ inline void set_is_extensible() {
+ set_bit_field2(bit_field2() | (1 << kIsExtensible));
+ }
+
+ inline bool is_extensible() {
+ return ((1 << kIsExtensible) & bit_field2()) != 0;
+ }
+
// Tells whether the instance needs security checks when accessing its
// properties.
inline void set_is_access_check_needed(bool access_check_needed);
@@ -3006,6 +3019,7 @@ class Map: public HeapObject {
// Bit positions for bit field 2
static const int kNeedsLoading = 0;
+ static const int kIsExtensible = 1;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(Map);
@@ -3213,8 +3227,8 @@ class SharedFunctionInfo: public HeapObject {
// this.x = y; where y is either a constant or refers to an argument.
inline bool has_only_simple_this_property_assignments();
- inline bool try_fast_codegen();
- inline void set_try_fast_codegen(bool flag);
+ inline bool try_full_codegen();
+ inline void set_try_full_codegen(bool flag);
// For functions which only contains this property assignments this provides
// access to the names for the properties assigned.
@@ -3295,7 +3309,7 @@ class SharedFunctionInfo: public HeapObject {
// Bit positions in compiler_hints.
static const int kHasOnlySimpleThisPropertyAssignments = 0;
- static const int kTryFastCodegen = 1;
+ static const int kTryFullCodegen = 1;
DISALLOW_IMPLICIT_CONSTRUCTORS(SharedFunctionInfo);
};
@@ -3640,6 +3654,8 @@ class JSRegExp: public JSObject {
FixedArray::kHeaderSize + kTagIndex * kPointerSize;
static const int kDataAsciiCodeOffset =
FixedArray::kHeaderSize + kIrregexpASCIICodeIndex * kPointerSize;
+ static const int kDataUC16CodeOffset =
+ FixedArray::kHeaderSize + kIrregexpUC16CodeIndex * kPointerSize;
static const int kIrregexpCaptureCountOffset =
FixedArray::kHeaderSize + kIrregexpCaptureCountIndex * kPointerSize;
};
@@ -4463,6 +4479,10 @@ class JSArray: public JSObject {
// [length]: The length property.
DECL_ACCESSORS(length, Object)
+ // Overload the length setter to skip write barrier when the length
+ // is set to a smi. This matches the set function on FixedArray.
+ inline void set_length(Smi* length);
+
Object* JSArrayUpdateLengthFromIndex(uint32_t index, Object* value);
// Initialize the array with the given capacity. The function may
diff --git a/deps/v8/src/parser.cc b/deps/v8/src/parser.cc
index 4090a080f..b06d86f59 100644
--- a/deps/v8/src/parser.cc
+++ b/deps/v8/src/parser.cc
@@ -91,7 +91,7 @@ class PositionStack {
class Parser {
public:
Parser(Handle<Script> script, bool allow_natives_syntax,
- v8::Extension* extension, bool is_pre_parsing,
+ v8::Extension* extension, ParserMode is_pre_parsing,
ParserFactory* factory, ParserLog* log, ScriptDataImpl* pre_data);
virtual ~Parser() { }
@@ -112,6 +112,8 @@ class Parser {
FunctionLiteral* ParseLazy(Handle<String> source,
Handle<String> name,
int start_position, bool is_expression);
+ FunctionLiteral* ParseJson(Handle<String> source,
+ unibrow::CharacterStream* stream);
// The minimum number of contiguous assignment that will
// be treated as an initialization block. Benchmarks show that
@@ -202,7 +204,21 @@ class Parser {
Expression* ParseObjectLiteral(bool* ok);
Expression* ParseRegExpLiteral(bool seen_equal, bool* ok);
- // Decide if a property should be the object boilerplate.
+ // Populate the constant properties fixed array for a materialized object
+ // literal.
+ void BuildObjectLiteralConstantProperties(
+ ZoneList<ObjectLiteral::Property*>* properties,
+ Handle<FixedArray> constants,
+ bool* is_simple,
+ int* depth);
+
+ // Populate the literals fixed array for a materialized array literal.
+ void BuildArrayLiteralBoilerplateLiterals(ZoneList<Expression*>* properties,
+ Handle<FixedArray> constants,
+ bool* is_simple,
+ int* depth);
+
+ // Decide if a property should be in the object boilerplate.
bool IsBoilerplateProperty(ObjectLiteral::Property* property);
// If the expression is a literal, return the literal value;
// if the expression is a materialized literal and is simple return a
@@ -231,6 +247,7 @@ class Parser {
INLINE(Token::Value Next()) { return scanner_.Next(); }
INLINE(void Consume(Token::Value token));
void Expect(Token::Value token, bool* ok);
+ bool Check(Token::Value token);
void ExpectSemicolon(bool* ok);
// Get odd-ball literals.
@@ -277,6 +294,29 @@ class Parser {
Handle<String> type,
Vector< Handle<Object> > arguments);
+ // JSON is a subset of JavaScript, as specified in, e.g., the ECMAScript 5
+ // specification section 15.12.1 (and appendix A.8).
+ // The grammar is given section 15.12.1.2 (and appendix A.8.2).
+
+ // Parse JSON input as a single JSON value.
+ Expression* ParseJson(bool* ok);
+
+ // Parse a single JSON value from input (grammar production JSONValue).
+ // A JSON value is either a (double-quoted) string literal, a number literal,
+ // one of "true", "false", or "null", or an object or array literal.
+ Expression* ParseJsonValue(bool* ok);
+ // Parse a JSON object literal (grammar production JSONObject).
+ // An object literal is a squiggly-braced and comma separated sequence
+ // (possibly empty) of key/value pairs, where the key is a JSON string
+ // literal, the value is a JSON value, and the two are spearated by a colon.
+ // A JavaScript object also allows numbers and identifiers as keys.
+ Expression* ParseJsonObject(bool* ok);
+ // Parses a JSON array literal (grammar production JSONArray). An array
+ // literal is a square-bracketed and comma separated sequence (possibly empty)
+ // of JSON values.
+ // A JavaScript array allows leaving out values from the sequence.
+ Expression* ParseJsonArray(bool* ok);
+
friend class Target;
friend class TargetScope;
friend class LexicalScope;
@@ -983,7 +1023,7 @@ class AstBuildingParser : public Parser {
public:
AstBuildingParser(Handle<Script> script, bool allow_natives_syntax,
v8::Extension* extension, ScriptDataImpl* pre_data)
- : Parser(script, allow_natives_syntax, extension, false,
+ : Parser(script, allow_natives_syntax, extension, PARSE,
factory(), log(), pre_data) { }
virtual void ReportMessageAt(Scanner::Location loc, const char* message,
Vector<const char*> args);
@@ -1002,9 +1042,9 @@ class PreParser : public Parser {
public:
PreParser(Handle<Script> script, bool allow_natives_syntax,
v8::Extension* extension)
- : Parser(script, allow_natives_syntax, extension, true,
- factory(), recorder(), NULL)
- , factory_(true) { }
+ : Parser(script, allow_natives_syntax, extension, PREPARSE,
+ factory(), recorder(), NULL),
+ factory_(true) { }
virtual void ReportMessageAt(Scanner::Location loc, const char* message,
Vector<const char*> args);
virtual VariableProxy* Declare(Handle<String> name, Variable::Mode mode,
@@ -1147,7 +1187,7 @@ class LexicalScope BASE_EMBEDDED {
Parser::Parser(Handle<Script> script,
bool allow_natives_syntax,
v8::Extension* extension,
- bool is_pre_parsing,
+ ParserMode is_pre_parsing,
ParserFactory* factory,
ParserLog* log,
ScriptDataImpl* pre_data)
@@ -1161,7 +1201,7 @@ Parser::Parser(Handle<Script> script,
extension_(extension),
factory_(factory),
log_(log),
- is_pre_parsing_(is_pre_parsing),
+ is_pre_parsing_(is_pre_parsing == PREPARSE),
pre_data_(pre_data) {
}
@@ -1172,7 +1212,7 @@ bool Parser::PreParseProgram(Handle<String> source,
AssertNoZoneAllocation assert_no_zone_allocation;
AssertNoAllocation assert_no_allocation;
NoHandleAllocation no_handle_allocation;
- scanner_.Init(source, stream, 0);
+ scanner_.Init(source, stream, 0, JAVASCRIPT);
ASSERT(target_stack_ == NULL);
mode_ = PARSE_EAGERLY;
DummyScope top_scope;
@@ -1195,7 +1235,7 @@ FunctionLiteral* Parser::ParseProgram(Handle<String> source,
// Initialize parser state.
source->TryFlattenIfNotFlat();
- scanner_.Init(source, stream, 0);
+ scanner_.Init(source, stream, 0, JAVASCRIPT);
ASSERT(target_stack_ == NULL);
// Compute the parsing mode.
@@ -1254,7 +1294,7 @@ FunctionLiteral* Parser::ParseLazy(Handle<String> source,
SafeStringInputBuffer buffer(source.location());
// Initialize parser state.
- scanner_.Init(source, &buffer, start_position);
+ scanner_.Init(source, &buffer, start_position, JAVASCRIPT);
ASSERT(target_stack_ == NULL);
mode_ = PARSE_EAGERLY;
@@ -1290,6 +1330,55 @@ FunctionLiteral* Parser::ParseLazy(Handle<String> source,
return result;
}
+FunctionLiteral* Parser::ParseJson(Handle<String> source,
+ unibrow::CharacterStream* stream) {
+ CompilationZoneScope zone_scope(DONT_DELETE_ON_EXIT);
+
+ HistogramTimerScope timer(&Counters::parse);
+ Counters::total_parse_size.Increment(source->length());
+
+ // Initialize parser state.
+ source->TryFlattenIfNotFlat();
+ scanner_.Init(source, stream, 0, JSON);
+ ASSERT(target_stack_ == NULL);
+
+ FunctionLiteral* result = NULL;
+ Handle<String> no_name = factory()->EmptySymbol();
+
+ {
+ Scope* scope = factory()->NewScope(top_scope_, Scope::GLOBAL_SCOPE, false);
+ LexicalScope lexical_scope(this, scope);
+ TemporaryScope temp_scope(this);
+ bool ok = true;
+ Expression* expression = ParseJson(&ok);
+ if (ok) {
+ ZoneListWrapper<Statement> statement = factory()->NewList<Statement>(1);
+ statement.Add(new ExpressionStatement(expression));
+ result = NEW(FunctionLiteral(
+ no_name,
+ top_scope_,
+ statement.elements(),
+ temp_scope.materialized_literal_count(),
+ temp_scope.expected_property_count(),
+ temp_scope.only_simple_this_property_assignments(),
+ temp_scope.this_property_assignments(),
+ 0,
+ 0,
+ source->length(),
+ false));
+ } else if (scanner().stack_overflow()) {
+ Top::StackOverflow();
+ }
+ }
+
+ // Make sure the target stack is empty.
+ ASSERT(target_stack_ == NULL);
+
+ // If there was a syntax error we have to get rid of the AST
+ // and it is not safe to do so before the scope has been deleted.
+ if (result == NULL) zone_scope.DeleteOnExit();
+ return result;
+}
void Parser::ReportMessage(const char* type, Vector<const char*> args) {
Scanner::Location source_location = scanner_.location();
@@ -1868,8 +1957,10 @@ Statement* Parser::ParseNativeDeclaration(bool* ok) {
Handle<JSFunction> fun = Utils::OpenHandle(*fun_template->GetFunction());
const int literals = fun->NumberOfLiterals();
Handle<Code> code = Handle<Code>(fun->shared()->code());
+ Handle<Code> construct_stub = Handle<Code>(fun->shared()->construct_stub());
Handle<JSFunction> boilerplate =
Factory::NewFunctionBoilerplate(name, literals, code);
+ boilerplate->shared()->set_construct_stub(*construct_stub);
// Copy the function data to the boilerplate. Used by
// builtins.cc:HandleApiCall to perform argument type checks and to
@@ -3120,7 +3211,7 @@ DebuggerStatement* Parser::ParseDebuggerStatement(bool* ok) {
void Parser::ReportUnexpectedToken(Token::Value token) {
// We don't report stack overflows here, to avoid increasing the
// stack depth even further. Instead we report it after parsing is
- // over, in ParseProgram.
+ // over, in ParseProgram/ParseJson.
if (token == Token::ILLEGAL && scanner().stack_overflow())
return;
// Four of the tokens are treated specially
@@ -3260,6 +3351,33 @@ Expression* Parser::ParsePrimaryExpression(bool* ok) {
}
+void Parser::BuildArrayLiteralBoilerplateLiterals(ZoneList<Expression*>* values,
+ Handle<FixedArray> literals,
+ bool* is_simple,
+ int* depth) {
+ // Fill in the literals.
+ // Accumulate output values in local variables.
+ bool is_simple_acc = true;
+ int depth_acc = 1;
+ for (int i = 0; i < values->length(); i++) {
+ MaterializedLiteral* m_literal = values->at(i)->AsMaterializedLiteral();
+ if (m_literal != NULL && m_literal->depth() >= depth_acc) {
+ depth_acc = m_literal->depth() + 1;
+ }
+ Handle<Object> boilerplate_value = GetBoilerplateValue(values->at(i));
+ if (boilerplate_value->IsUndefined()) {
+ literals->set_the_hole(i);
+ is_simple_acc = false;
+ } else {
+ literals->set(i, *boilerplate_value);
+ }
+ }
+
+ *is_simple = is_simple_acc;
+ *depth = depth_acc;
+}
+
+
Expression* Parser::ParseArrayLiteral(bool* ok) {
// ArrayLiteral ::
// '[' Expression? (',' Expression?)* ']'
@@ -3362,6 +3480,43 @@ Handle<Object> Parser::GetBoilerplateValue(Expression* expression) {
}
+void Parser::BuildObjectLiteralConstantProperties(
+ ZoneList<ObjectLiteral::Property*>* properties,
+ Handle<FixedArray> constant_properties,
+ bool* is_simple,
+ int* depth) {
+ int position = 0;
+ // Accumulate the value in local variables and store it at the end.
+ bool is_simple_acc = true;
+ int depth_acc = 1;
+ for (int i = 0; i < properties->length(); i++) {
+ ObjectLiteral::Property* property = properties->at(i);
+ if (!IsBoilerplateProperty(property)) {
+ is_simple_acc = false;
+ continue;
+ }
+ MaterializedLiteral* m_literal = property->value()->AsMaterializedLiteral();
+ if (m_literal != NULL && m_literal->depth() >= depth_acc) {
+ depth_acc = m_literal->depth() + 1;
+ }
+
+ // Add CONSTANT and COMPUTED properties to boilerplate. Use undefined
+ // value for COMPUTED properties, the real value is filled in at
+ // runtime. The enumeration order is maintained.
+ Handle<Object> key = property->key()->handle();
+ Handle<Object> value = GetBoilerplateValue(property->value());
+ is_simple_acc = is_simple_acc && !value->IsUndefined();
+
+ // Add name, value pair to the fixed array.
+ constant_properties->set(position++, *key);
+ constant_properties->set(position++, *value);
+ }
+
+ *is_simple = is_simple_acc;
+ *depth = depth_acc;
+}
+
+
Expression* Parser::ParseObjectLiteral(bool* ok) {
// ObjectLiteral ::
// '{' (
@@ -3452,32 +3607,13 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
Handle<FixedArray> constant_properties =
Factory::NewFixedArray(number_of_boilerplate_properties * 2, TENURED);
- int position = 0;
+
bool is_simple = true;
int depth = 1;
- for (int i = 0; i < properties.length(); i++) {
- ObjectLiteral::Property* property = properties.at(i);
- if (!IsBoilerplateProperty(property)) {
- is_simple = false;
- continue;
- }
- MaterializedLiteral* m_literal = property->value()->AsMaterializedLiteral();
- if (m_literal != NULL && m_literal->depth() + 1 > depth) {
- depth = m_literal->depth() + 1;
- }
-
- // Add CONSTANT and COMPUTED properties to boilerplate. Use undefined
- // value for COMPUTED properties, the real value is filled in at
- // runtime. The enumeration order is maintained.
- Handle<Object> key = property->key()->handle();
- Handle<Object> value = GetBoilerplateValue(property->value());
- is_simple = is_simple && !value->IsUndefined();
-
- // Add name, value pair to the fixed array.
- constant_properties->set(position++, *key);
- constant_properties->set(position++, *value);
- }
-
+ BuildObjectLiteralConstantProperties(properties.elements(),
+ constant_properties,
+ &is_simple,
+ &depth);
return new ObjectLiteral(constant_properties,
properties.elements(),
literal_index,
@@ -3718,6 +3854,16 @@ void Parser::Expect(Token::Value token, bool* ok) {
}
+bool Parser::Check(Token::Value token) {
+ Token::Value next = peek();
+ if (next == token) {
+ Consume(next);
+ return true;
+ }
+ return false;
+}
+
+
void Parser::ExpectSemicolon(bool* ok) {
// Check for automatic semicolon insertion according to
// the rules given in ECMA-262, section 7.9, page 21.
@@ -3884,6 +4030,145 @@ Expression* Parser::NewThrowError(Handle<String> constructor,
scanner().location().beg_pos);
}
+// ----------------------------------------------------------------------------
+// JSON
+
+Expression* Parser::ParseJson(bool* ok) {
+ Expression* result = ParseJsonValue(CHECK_OK);
+ Expect(Token::EOS, CHECK_OK);
+ return result;
+}
+
+
+// Parse any JSON value.
+Expression* Parser::ParseJsonValue(bool* ok) {
+ Token::Value token = peek();
+ switch (token) {
+ case Token::STRING: {
+ Consume(Token::STRING);
+ int literal_length = scanner_.literal_length();
+ const char* literal_string = scanner_.literal_string();
+ if (literal_length == 0) {
+ return NEW(Literal(Factory::empty_string()));
+ }
+ Vector<const char> literal(literal_string, literal_length);
+ return NEW(Literal(Factory::NewStringFromUtf8(literal, TENURED)));
+ }
+ case Token::NUMBER: {
+ Consume(Token::NUMBER);
+ ASSERT(scanner_.literal_length() > 0);
+ double value = StringToDouble(scanner_.literal_string(),
+ NO_FLAGS, // Hex, octal or trailing junk.
+ OS::nan_value());
+ return NewNumberLiteral(value);
+ }
+ case Token::FALSE_LITERAL:
+ Consume(Token::FALSE_LITERAL);
+ return NEW(Literal(Factory::false_value()));
+ case Token::TRUE_LITERAL:
+ Consume(Token::TRUE_LITERAL);
+ return NEW(Literal(Factory::true_value()));
+ case Token::NULL_LITERAL:
+ Consume(Token::NULL_LITERAL);
+ return NEW(Literal(Factory::null_value()));
+ case Token::LBRACE: {
+ Expression* result = ParseJsonObject(CHECK_OK);
+ return result;
+ }
+ case Token::LBRACK: {
+ Expression* result = ParseJsonArray(CHECK_OK);
+ return result;
+ }
+ default:
+ *ok = false;
+ ReportUnexpectedToken(token);
+ return NULL;
+ }
+}
+
+
+// Parse a JSON object. Scanner must be right after '{' token.
+Expression* Parser::ParseJsonObject(bool* ok) {
+ Consume(Token::LBRACE);
+ ZoneListWrapper<ObjectLiteral::Property> properties =
+ factory()->NewList<ObjectLiteral::Property>(4);
+ int boilerplate_properties = 0;
+ if (peek() != Token::RBRACE) {
+ do {
+ Expect(Token::STRING, CHECK_OK);
+ Handle<String> key = factory()->LookupSymbol(scanner_.literal_string(),
+ scanner_.literal_length());
+ Expect(Token::COLON, CHECK_OK);
+ Expression* value = ParseJsonValue(CHECK_OK);
+ Literal* key_literal;
+ uint32_t index;
+ if (key->AsArrayIndex(&index)) {
+ key_literal = NewNumberLiteral(index);
+ } else {
+ key_literal = NEW(Literal(key));
+ }
+ ObjectLiteral::Property* property =
+ NEW(ObjectLiteral::Property(key_literal, value));
+ properties.Add(property);
+
+ if (IsBoilerplateProperty(property)) {
+ boilerplate_properties++;
+ }
+ } while (Check(Token::COMMA));
+ }
+ Expect(Token::RBRACE, CHECK_OK);
+
+ int literal_index = temp_scope_->NextMaterializedLiteralIndex();
+ if (is_pre_parsing_) return NULL;
+
+ Handle<FixedArray> constant_properties =
+ Factory::NewFixedArray(boilerplate_properties * 2, TENURED);
+ bool is_simple = true;
+ int depth = 1;
+ BuildObjectLiteralConstantProperties(properties.elements(),
+ constant_properties,
+ &is_simple,
+ &depth);
+ return new ObjectLiteral(constant_properties,
+ properties.elements(),
+ literal_index,
+ is_simple,
+ depth);
+}
+
+
+// Parse a JSON array. Scanner must be right after '[' token.
+Expression* Parser::ParseJsonArray(bool* ok) {
+ Consume(Token::LBRACK);
+
+ ZoneListWrapper<Expression> values = factory()->NewList<Expression>(4);
+ if (peek() != Token::RBRACK) {
+ do {
+ Expression* exp = ParseJsonValue(CHECK_OK);
+ values.Add(exp);
+ } while (Check(Token::COMMA));
+ }
+ Expect(Token::RBRACK, CHECK_OK);
+
+ // Update the scope information before the pre-parsing bailout.
+ int literal_index = temp_scope_->NextMaterializedLiteralIndex();
+
+ if (is_pre_parsing_) return NULL;
+
+ // Allocate a fixed array with all the literals.
+ Handle<FixedArray> literals =
+ Factory::NewFixedArray(values.length(), TENURED);
+
+ bool is_simple;
+ int depth;
+ BuildArrayLiteralBoilerplateLiterals(values.elements(),
+ literals,
+ &is_simple,
+ &depth);
+ return NEW(ArrayLiteral(literals, values.elements(),
+ literal_index, is_simple, depth));
+}
+
// ----------------------------------------------------------------------------
// Regular expressions
@@ -4759,7 +5044,8 @@ bool ParseRegExp(FlatStringReader* input,
FunctionLiteral* MakeAST(bool compile_in_global_context,
Handle<Script> script,
v8::Extension* extension,
- ScriptDataImpl* pre_data) {
+ ScriptDataImpl* pre_data,
+ bool is_json) {
bool allow_natives_syntax =
always_allow_natives_syntax ||
FLAG_allow_natives_syntax ||
@@ -4771,15 +5057,21 @@ FunctionLiteral* MakeAST(bool compile_in_global_context,
Vector<const char*> args = pre_data->BuildArgs();
parser.ReportMessageAt(loc, message, args);
DeleteArray(message);
- for (int i = 0; i < args.length(); i++)
+ for (int i = 0; i < args.length(); i++) {
DeleteArray(args[i]);
+ }
DeleteArray(args.start());
return NULL;
}
Handle<String> source = Handle<String>(String::cast(script->source()));
SafeStringInputBuffer input(source.location());
- FunctionLiteral* result = parser.ParseProgram(source,
- &input, compile_in_global_context);
+ FunctionLiteral* result;
+ if (is_json) {
+ ASSERT(compile_in_global_context);
+ result = parser.ParseJson(source, &input);
+ } else {
+ result = parser.ParseProgram(source, &input, compile_in_global_context);
+ }
return result;
}
diff --git a/deps/v8/src/parser.h b/deps/v8/src/parser.h
index a67284c28..0f808d726 100644
--- a/deps/v8/src/parser.h
+++ b/deps/v8/src/parser.h
@@ -133,7 +133,8 @@ class ScriptDataImpl : public ScriptData {
FunctionLiteral* MakeAST(bool compile_in_global_context,
Handle<Script> script,
v8::Extension* extension,
- ScriptDataImpl* pre_data);
+ ScriptDataImpl* pre_data,
+ bool is_json = false);
ScriptDataImpl* PreParse(Handle<String> source,
diff --git a/deps/v8/src/platform-freebsd.cc b/deps/v8/src/platform-freebsd.cc
index ee0d7b8c2..ff7577685 100644
--- a/deps/v8/src/platform-freebsd.cc
+++ b/deps/v8/src/platform-freebsd.cc
@@ -73,12 +73,6 @@ double ceiling(double x) {
}
-double OS::nan_value() {
- // NAN from math.h is defined in C99 and not in POSIX.
- return NAN;
-}
-
-
void OS::Setup() {
// Seed the random number generator.
// Convert the current time to a 64-bit integer first, before converting it
@@ -579,17 +573,17 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
mcontext_t& mcontext = ucontext->uc_mcontext;
#if V8_HOST_ARCH_IA32
- sample.pc = mcontext.mc_eip;
- sample.sp = mcontext.mc_esp;
- sample.fp = mcontext.mc_ebp;
+ sample.pc = reinterpret_cast<Address>(mcontext.mc_eip);
+ sample.sp = reinterpret_cast<Address>(mcontext.mc_esp);
+ sample.fp = reinterpret_cast<Address>(mcontext.mc_ebp);
#elif V8_HOST_ARCH_X64
- sample.pc = mcontext.mc_rip;
- sample.sp = mcontext.mc_rsp;
- sample.fp = mcontext.mc_rbp;
+ sample.pc = reinterpret_cast<Address>(mcontext.mc_rip);
+ sample.sp = reinterpret_cast<Address>(mcontext.mc_rsp);
+ sample.fp = reinterpret_cast<Address>(mcontext.mc_rbp);
#elif V8_HOST_ARCH_ARM
- sample.pc = mcontext.mc_r15;
- sample.sp = mcontext.mc_r13;
- sample.fp = mcontext.mc_r11;
+ sample.pc = reinterpret_cast<Address>(mcontext.mc_r15);
+ sample.sp = reinterpret_cast<Address>(mcontext.mc_r13);
+ sample.fp = reinterpret_cast<Address>(mcontext.mc_r11);
#endif
active_sampler_->SampleStack(&sample);
}
diff --git a/deps/v8/src/platform-linux.cc b/deps/v8/src/platform-linux.cc
index 403bf42b7..005b1deb6 100644
--- a/deps/v8/src/platform-linux.cc
+++ b/deps/v8/src/platform-linux.cc
@@ -73,12 +73,6 @@ double ceiling(double x) {
}
-double OS::nan_value() {
- // NAN from math.h is defined in C99 and not in POSIX.
- return NAN;
-}
-
-
void OS::Setup() {
// Seed the random number generator.
// Convert the current time to a 64-bit integer first, before converting it
@@ -731,23 +725,23 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
mcontext_t& mcontext = ucontext->uc_mcontext;
#if V8_HOST_ARCH_IA32
- sample.pc = mcontext.gregs[REG_EIP];
- sample.sp = mcontext.gregs[REG_ESP];
- sample.fp = mcontext.gregs[REG_EBP];
+ sample.pc = reinterpret_cast<Address>(mcontext.gregs[REG_EIP]);
+ sample.sp = reinterpret_cast<Address>(mcontext.gregs[REG_ESP]);
+ sample.fp = reinterpret_cast<Address>(mcontext.gregs[REG_EBP]);
#elif V8_HOST_ARCH_X64
- sample.pc = mcontext.gregs[REG_RIP];
- sample.sp = mcontext.gregs[REG_RSP];
- sample.fp = mcontext.gregs[REG_RBP];
+ sample.pc = reinterpret_cast<Address>(mcontext.gregs[REG_RIP]);
+ sample.sp = reinterpret_cast<Address>(mcontext.gregs[REG_RSP]);
+ sample.fp = reinterpret_cast<Address>(mcontext.gregs[REG_RBP]);
#elif V8_HOST_ARCH_ARM
// An undefined macro evaluates to 0, so this applies to Android's Bionic also.
#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
- sample.pc = mcontext.gregs[R15];
- sample.sp = mcontext.gregs[R13];
- sample.fp = mcontext.gregs[R11];
+ sample.pc = reinterpret_cast<Address>(mcontext.gregs[R15]);
+ sample.sp = reinterpret_cast<Address>(mcontext.gregs[R13]);
+ sample.fp = reinterpret_cast<Address>(mcontext.gregs[R11]);
#else
- sample.pc = mcontext.arm_pc;
- sample.sp = mcontext.arm_sp;
- sample.fp = mcontext.arm_fp;
+ sample.pc = reinterpret_cast<Address>(mcontext.arm_pc);
+ sample.sp = reinterpret_cast<Address>(mcontext.arm_sp);
+ sample.fp = reinterpret_cast<Address>(mcontext.arm_fp);
#endif
#endif
if (IsVmThread())
diff --git a/deps/v8/src/platform-macos.cc b/deps/v8/src/platform-macos.cc
index 0c7a8b816..e379ae226 100644
--- a/deps/v8/src/platform-macos.cc
+++ b/deps/v8/src/platform-macos.cc
@@ -86,12 +86,6 @@ double ceiling(double x) {
}
-double OS::nan_value() {
- // NAN from math.h is defined in C99 and not in POSIX.
- return NAN;
-}
-
-
void OS::Setup() {
// Seed the random number generator.
// Convert the current time to a 64-bit integer first, before converting it
@@ -583,9 +577,9 @@ class Sampler::PlatformData : public Malloced {
flavor,
reinterpret_cast<natural_t*>(&state),
&count) == KERN_SUCCESS) {
- sample.pc = state.REGISTER_FIELD(ip);
- sample.sp = state.REGISTER_FIELD(sp);
- sample.fp = state.REGISTER_FIELD(bp);
+ sample.pc = reinterpret_cast<Address>(state.REGISTER_FIELD(ip));
+ sample.sp = reinterpret_cast<Address>(state.REGISTER_FIELD(sp));
+ sample.fp = reinterpret_cast<Address>(state.REGISTER_FIELD(bp));
sampler_->SampleStack(&sample);
}
thread_resume(profiled_thread_);
diff --git a/deps/v8/src/platform-openbsd.cc b/deps/v8/src/platform-openbsd.cc
index c85861c43..62e600441 100644
--- a/deps/v8/src/platform-openbsd.cc
+++ b/deps/v8/src/platform-openbsd.cc
@@ -72,12 +72,6 @@ double ceiling(double x) {
}
-double OS::nan_value() {
- // NAN from math.h is defined in C99 and not in POSIX.
- return NAN;
-}
-
-
void OS::Setup() {
// Seed the random number generator.
// Convert the current time to a 64-bit integer first, before converting it
diff --git a/deps/v8/src/platform-posix.cc b/deps/v8/src/platform-posix.cc
index d42e34e66..89f4d983d 100644
--- a/deps/v8/src/platform-posix.cc
+++ b/deps/v8/src/platform-posix.cc
@@ -61,6 +61,13 @@ double modulo(double x, double y) {
return fmod(x, y);
}
+
+double OS::nan_value() {
+ // NAN from math.h is defined in C99 and not in POSIX.
+ return NAN;
+}
+
+
// ----------------------------------------------------------------------------
// POSIX date/time support.
//
diff --git a/deps/v8/src/platform-solaris.cc b/deps/v8/src/platform-solaris.cc
index d58ce5876..85c2c54cf 100644
--- a/deps/v8/src/platform-solaris.cc
+++ b/deps/v8/src/platform-solaris.cc
@@ -28,19 +28,23 @@
// Platform specific code for Solaris 10 goes here. For the POSIX comaptible
// parts the implementation is in platform-posix.cc.
-#include <sys/stack.h> // for stack alignment
-#include <unistd.h> // getpagesize()
-#include <sys/mman.h> // mmap()
-#include <unistd.h> // usleep()
-#include <execinfo.h> // backtrace(), backtrace_symbols()
+#ifdef __sparc
+# error "V8 does not support the SPARC CPU architecture."
+#endif
+
+#include <sys/stack.h> // for stack alignment
+#include <unistd.h> // getpagesize(), usleep()
+#include <sys/mman.h> // mmap()
+#include <execinfo.h> // backtrace(), backtrace_symbols()
#include <pthread.h>
-#include <sched.h> // for sched_yield
+#include <sched.h> // for sched_yield
#include <semaphore.h>
#include <time.h>
-#include <sys/time.h> // gettimeofday(), timeradd()
+#include <sys/time.h> // gettimeofday(), timeradd()
#include <errno.h>
-#include <ieeefp.h> // finite()
-#include <signal.h> // sigemptyset(), etc
+#include <ieeefp.h> // finite()
+#include <signal.h> // sigemptyset(), etc
+
#undef MAP_TYPE
@@ -52,77 +56,12 @@
namespace v8 {
namespace internal {
-int isfinite(double x) {
- return finite(x) && !isnand(x);
-}
-
-} } // namespace v8::internal
-
-
-// Test for infinity - usually defined in math.h
-int isinf(double x) {
- fpclass_t fpc = fpclass(x);
- return (fpc == FP_NINF || fpc == FP_PINF);
-}
-
-
-// Test if x is less than y and both nominal - usually defined in math.h
-int isless(double x, double y) {
- return isnan(x) || isnan(y) ? 0 : x < y;
-}
-
-
-// Test if x is greater than y and both nominal - usually defined in math.h
-int isgreater(double x, double y) {
- return isnan(x) || isnan(y) ? 0 : x > y;
-}
-
-
-// Classify floating point number - usually defined in math.h#ifndef fpclassify
-int fpclassify(double x) {
- // Use the Solaris-specific fpclass() for classification.
- fpclass_t fpc = fpclass(x);
-
- switch (fpc) {
- case FP_PNORM:
- case FP_NNORM:
- return FP_NORMAL;
- case FP_PZERO:
- case FP_NZERO:
- return FP_ZERO;
- case FP_PDENORM:
- case FP_NDENORM:
- return FP_SUBNORMAL;
- case FP_PINF:
- case FP_NINF:
- return FP_INFINITE;
- default:
- // All cases should be covered by the code above.
- ASSERT(fpc == FP_QNAN || fpc == FP_SNAN);
- return FP_NAN;
- }
-}
-
-
-int signbit(double x) {
- // We need to take care of the special case of both positive
- // and negative versions of zero.
- if (x == 0)
- return fpclass(x) == FP_NZERO;
- else
- return x < 0;
-}
-
-
-namespace v8 {
-namespace internal {
// 0 is never a valid thread id on Solaris since the main thread is 1 and
// subsequent have their ids incremented from there
static const pthread_t kNoThread = (pthread_t) 0;
-// TODO: Test to see if ceil() is correct on Solaris.
double ceiling(double x) {
return ceil(x);
}
@@ -144,12 +83,6 @@ uint64_t OS::CpuFeaturesImpliedByPlatform() {
}
-double OS::nan_value() {
- static double NAN = __builtin_nan("0x0");
- return NAN;
-}
-
-
int OS::ActivationFrameAlignment() {
return STACK_ALIGN;
}
@@ -160,29 +93,17 @@ const char* OS::LocalTimezone(double time) {
time_t tv = static_cast<time_t>(floor(time/msPerSecond));
struct tm* t = localtime(&tv);
if (NULL == t) return "";
- return tzname[0]; // the location of the timezone string on Solaris
+ return tzname[0]; // The location of the timezone string on Solaris.
}
double OS::LocalTimeOffset() {
- int days, hours, minutes;
- time_t tv = time(NULL);
-
- // on Solaris, struct tm does not contain a tm_gmtoff field...
- struct tm* loc = localtime(&tv);
- struct tm* utc = gmtime(&tv);
-
- // calulate the utc offset
- days = loc->tm_yday = utc->tm_yday;
- hours = ((days < -1 ? 24 : 1 < days ? -24 : days * 24) +
- loc->tm_hour - utc->tm_hour);
- minutes = hours * 60 + loc->tm_min - utc->tm_min;
-
- // don't include any daylight savings offset in local time
- if (loc->tm_isdst > 0) minutes -= 60;
-
- // the result is in milliseconds
- return static_cast<double>(minutes * 60 * msPerSecond);
+ // On Solaris, struct tm does not contain a tm_gmtoff field.
+ time_t utc = time(NULL);
+ ASSERT(utc != -1);
+ struct tm* loc = localtime(&utc);
+ ASSERT(loc != NULL);
+ return static_cast<double>((mktime(loc) - utc) * msPerSecond);
}
@@ -209,7 +130,7 @@ bool OS::IsOutsideAllocatedSpace(void* address) {
size_t OS::AllocateAlignment() {
- return (size_t)getpagesize();
+ return static_cast<size_t>(getpagesize());
}
@@ -262,7 +183,7 @@ void OS::Sleep(int milliseconds) {
void OS::Abort() {
- // Redirect to std abort to signal abnormal program termination
+ // Redirect to std abort to signal abnormal program termination.
abort();
}
@@ -307,9 +228,6 @@ PosixMemoryMappedFile::~PosixMemoryMappedFile() {
void OS::LogSharedLibraryAddresses() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- UNIMPLEMENTED();
-#endif
}
@@ -610,6 +528,9 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
if (active_sampler_ == NULL) return;
TickSample sample;
+ sample.pc = 0;
+ sample.sp = 0;
+ sample.fp = 0;
// We always sample the VM state.
sample.state = Logger::state();
@@ -683,4 +604,4 @@ void Sampler::Stop() {
#endif // ENABLE_LOGGING_AND_PROFILING
-} } // namespace v8::internal
+} } // namespace v8::internal
diff --git a/deps/v8/src/platform-win32.cc b/deps/v8/src/platform-win32.cc
index 1be4b77f8..81b0d4c12 100644
--- a/deps/v8/src/platform-win32.cc
+++ b/deps/v8/src/platform-win32.cc
@@ -1813,13 +1813,13 @@ class Sampler::PlatformData : public Malloced {
context.ContextFlags = CONTEXT_FULL;
if (GetThreadContext(profiled_thread_, &context) != 0) {
#if V8_HOST_ARCH_X64
- sample.pc = context.Rip;
- sample.sp = context.Rsp;
- sample.fp = context.Rbp;
+ sample.pc = reinterpret_cast<Address>(context.Rip);
+ sample.sp = reinterpret_cast<Address>(context.Rsp);
+ sample.fp = reinterpret_cast<Address>(context.Rbp);
#else
- sample.pc = context.Eip;
- sample.sp = context.Esp;
- sample.fp = context.Ebp;
+ sample.pc = reinterpret_cast<Address>(context.Eip);
+ sample.sp = reinterpret_cast<Address>(context.Esp);
+ sample.fp = reinterpret_cast<Address>(context.Ebp);
#endif
sampler_->SampleStack(&sample);
}
diff --git a/deps/v8/src/platform.h b/deps/v8/src/platform.h
index 861a48a94..bc2e9d64f 100644
--- a/deps/v8/src/platform.h
+++ b/deps/v8/src/platform.h
@@ -44,21 +44,13 @@
#ifndef V8_PLATFORM_H_
#define V8_PLATFORM_H_
-#define V8_INFINITY INFINITY
-
#ifdef __sun
+// On Solaris, to get isinf, INFINITY, fpclassify and other macros one needs
+// to define this symbol
+#define __C99FEATURES__ 1
+#endif
-namespace v8 {
-namespace internal {
-int isfinite(double x);
-} }
-int isinf(double x);
-int isless(double x, double y);
-int isgreater(double x, double y);
-int fpclassify(double x);
-int signbit(double x);
-
-#endif // __sun
+#define V8_INFINITY INFINITY
// Windows specific stuff.
#ifdef WIN32
@@ -520,11 +512,18 @@ class Socket {
// TickSample captures the information collected for each sample.
class TickSample {
public:
- TickSample() : pc(0), sp(0), fp(0), state(OTHER), frames_count(0) {}
- uintptr_t pc; // Instruction pointer.
- uintptr_t sp; // Stack pointer.
- uintptr_t fp; // Frame pointer.
- StateTag state; // The state of the VM.
+ TickSample()
+ : pc(NULL),
+ sp(NULL),
+ fp(NULL),
+ function(NULL),
+ state(OTHER),
+ frames_count(0) {}
+ Address pc; // Instruction pointer.
+ Address sp; // Stack pointer.
+ Address fp; // Frame pointer.
+ Address function; // The last called JS function.
+ StateTag state; // The state of the VM.
static const int kMaxFramesCount = 100;
EmbeddedVector<Address, kMaxFramesCount> stack; // Call stack.
int frames_count; // Number of captured frames.
diff --git a/deps/v8/src/prettyprinter.cc b/deps/v8/src/prettyprinter.cc
index 9ef727026..ca570a648 100644
--- a/deps/v8/src/prettyprinter.cc
+++ b/deps/v8/src/prettyprinter.cc
@@ -594,12 +594,22 @@ class IndentedScope BASE_EMBEDDED {
ast_printer_->inc_indent();
}
- explicit IndentedScope(const char* txt, StaticType* type = NULL) {
+ explicit IndentedScope(const char* txt, AstNode* node = NULL) {
ast_printer_->PrintIndented(txt);
- if ((type != NULL) && (type->IsKnown())) {
- ast_printer_->Print(" (type = ");
- ast_printer_->Print(StaticType::Type2String(type));
- ast_printer_->Print(")");
+ if (node != NULL && node->AsExpression() != NULL) {
+ Expression* expr = node->AsExpression();
+ bool printed_first = false;
+ if ((expr->type() != NULL) && (expr->type()->IsKnown())) {
+ ast_printer_->Print(" (type = ");
+ ast_printer_->Print(StaticType::Type2String(expr->type()));
+ printed_first = true;
+ }
+ if (expr->num() != Expression::kNoLabel) {
+ ast_printer_->Print(printed_first ? ", num = " : " (num = ");
+ ast_printer_->Print("%d", expr->num());
+ printed_first = true;
+ }
+ if (printed_first) ast_printer_->Print(")");
}
ast_printer_->Print("\n");
ast_printer_->inc_indent();
@@ -657,19 +667,22 @@ void AstPrinter::PrintLiteralIndented(const char* info,
void AstPrinter::PrintLiteralWithModeIndented(const char* info,
Variable* var,
Handle<Object> value,
- StaticType* type) {
+ StaticType* type,
+ int num) {
if (var == NULL) {
PrintLiteralIndented(info, value, true);
} else {
EmbeddedVector<char, 256> buf;
+ int pos = OS::SNPrintF(buf, "%s (mode = %s", info,
+ Variable::Mode2String(var->mode()));
if (type->IsKnown()) {
- OS::SNPrintF(buf, "%s (mode = %s, type = %s)", info,
- Variable::Mode2String(var->mode()),
- StaticType::Type2String(type));
- } else {
- OS::SNPrintF(buf, "%s (mode = %s)", info,
- Variable::Mode2String(var->mode()));
+ pos += OS::SNPrintF(buf + pos, ", type = %s",
+ StaticType::Type2String(type));
+ }
+ if (num != Expression::kNoLabel) {
+ pos += OS::SNPrintF(buf + pos, ", num = %d", num);
}
+ OS::SNPrintF(buf + pos, ")");
PrintLiteralIndented(buf.start(), value, true);
}
}
@@ -692,7 +705,7 @@ void AstPrinter::PrintLabelsIndented(const char* info, ZoneStringList* labels) {
void AstPrinter::PrintIndentedVisit(const char* s, AstNode* node) {
- IndentedScope indent(s);
+ IndentedScope indent(s, node);
Visit(node);
}
@@ -726,7 +739,8 @@ void AstPrinter::PrintParameters(Scope* scope) {
for (int i = 0; i < scope->num_parameters(); i++) {
PrintLiteralWithModeIndented("VAR", scope->parameter(i),
scope->parameter(i)->name(),
- scope->parameter(i)->type());
+ scope->parameter(i)->type(),
+ Expression::kNoLabel);
}
}
}
@@ -771,7 +785,8 @@ void AstPrinter::VisitDeclaration(Declaration* node) {
PrintLiteralWithModeIndented(Variable::Mode2String(node->mode()),
node->proxy()->AsVariable(),
node->proxy()->name(),
- node->proxy()->AsVariable()->type());
+ node->proxy()->AsVariable()->type(),
+ Expression::kNoLabel);
} else {
// function declarations
PrintIndented("FUNCTION ");
@@ -1007,7 +1022,7 @@ void AstPrinter::VisitSlot(Slot* node) {
void AstPrinter::VisitVariableProxy(VariableProxy* node) {
PrintLiteralWithModeIndented("VAR PROXY", node->AsVariable(), node->name(),
- node->type());
+ node->type(), node->num());
Variable* var = node->var();
if (var != NULL && var->rewrite() != NULL) {
IndentedScope indent;
@@ -1017,7 +1032,7 @@ void AstPrinter::VisitVariableProxy(VariableProxy* node) {
void AstPrinter::VisitAssignment(Assignment* node) {
- IndentedScope indent(Token::Name(node->op()), node->type());
+ IndentedScope indent(Token::Name(node->op()), node);
Visit(node->target());
Visit(node->value());
}
@@ -1029,7 +1044,7 @@ void AstPrinter::VisitThrow(Throw* node) {
void AstPrinter::VisitProperty(Property* node) {
- IndentedScope indent("PROPERTY");
+ IndentedScope indent("PROPERTY", node);
Visit(node->obj());
Literal* literal = node->key()->AsLiteral();
if (literal != NULL && literal->handle()->IsSymbol()) {
@@ -1082,14 +1097,14 @@ void AstPrinter::VisitCountOperation(CountOperation* node) {
void AstPrinter::VisitBinaryOperation(BinaryOperation* node) {
- IndentedScope indent(Token::Name(node->op()), node->type());
+ IndentedScope indent(Token::Name(node->op()), node);
Visit(node->left());
Visit(node->right());
}
void AstPrinter::VisitCompareOperation(CompareOperation* node) {
- IndentedScope indent(Token::Name(node->op()), node->type());
+ IndentedScope indent(Token::Name(node->op()), node);
Visit(node->left());
Visit(node->right());
}
diff --git a/deps/v8/src/prettyprinter.h b/deps/v8/src/prettyprinter.h
index dfff49a45..8e958c77e 100644
--- a/deps/v8/src/prettyprinter.h
+++ b/deps/v8/src/prettyprinter.h
@@ -102,7 +102,8 @@ class AstPrinter: public PrettyPrinter {
void PrintLiteralWithModeIndented(const char* info,
Variable* var,
Handle<Object> value,
- StaticType* type);
+ StaticType* type,
+ int num);
void PrintLabelsIndented(const char* info, ZoneStringList* labels);
void inc_indent() { indent_++; }
diff --git a/deps/v8/src/regexp-macro-assembler.cc b/deps/v8/src/regexp-macro-assembler.cc
index c73e02a8e..0fcfc33d7 100644
--- a/deps/v8/src/regexp-macro-assembler.cc
+++ b/deps/v8/src/regexp-macro-assembler.cc
@@ -122,7 +122,10 @@ NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Match(
bool is_ascii = subject->IsAsciiRepresentation();
+ // The string has been flattened, so it it is a cons string it contains the
+ // full string in the first part.
if (StringShape(subject_ptr).IsCons()) {
+ ASSERT_EQ(0, ConsString::cast(subject_ptr)->second()->length());
subject_ptr = ConsString::cast(subject_ptr)->first();
}
// Ensure that an underlying string has the same ascii-ness.
@@ -141,8 +144,7 @@ NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Match(
start_offset,
input_start,
input_end,
- offsets_vector,
- previous_index == 0);
+ offsets_vector);
return res;
}
@@ -153,14 +155,11 @@ NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Execute(
int start_offset,
const byte* input_start,
const byte* input_end,
- int* output,
- bool at_start) {
+ int* output) {
typedef int (*matcher)(String*, int, const byte*,
- const byte*, int*, int, Address, int);
+ const byte*, int*, Address, int);
matcher matcher_func = FUNCTION_CAST<matcher>(code->entry());
- int at_start_val = at_start ? 1 : 0;
-
// Ensure that the minimum stack has been allocated.
RegExpStack stack;
Address stack_base = RegExpStack::stack_base();
@@ -172,7 +171,6 @@ NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Execute(
input_start,
input_end,
output,
- at_start_val,
stack_base,
direct_call);
ASSERT(result <= SUCCESS);
@@ -189,6 +187,30 @@ NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Execute(
static unibrow::Mapping<unibrow::Ecma262Canonicalize> canonicalize;
+
+byte NativeRegExpMacroAssembler::word_character_map[] = {
+ 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
+ 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
+ 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
+ 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
+
+ 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
+ 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
+ 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, // '0' - '7'
+ 0xffu, 0xffu, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, // '8' - '9'
+
+ 0x00u, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, // 'A' - 'G'
+ 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, // 'H' - 'O'
+ 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, // 'P' - 'W'
+ 0xffu, 0xffu, 0xffu, 0x00u, 0x00u, 0x00u, 0x00u, 0xffu, // 'X' - 'Z', '_'
+
+ 0x00u, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, // 'a' - 'g'
+ 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, // 'h' - 'o'
+ 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, // 'p' - 'w'
+ 0xffu, 0xffu, 0xffu, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, // 'x' - 'z'
+};
+
+
int NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16(
Address byte_offset1,
Address byte_offset2,
diff --git a/deps/v8/src/regexp-macro-assembler.h b/deps/v8/src/regexp-macro-assembler.h
index 7cc95110e..105d8cc48 100644
--- a/deps/v8/src/regexp-macro-assembler.h
+++ b/deps/v8/src/regexp-macro-assembler.h
@@ -204,13 +204,21 @@ class NativeRegExpMacroAssembler: public RegExpMacroAssembler {
static const byte* StringCharacterPosition(String* subject, int start_index);
+ // Byte map of ASCII characters with a 0xff if the character is a word
+ // character (digit, letter or underscore) and 0x00 otherwise.
+ // Used by generated RegExp code.
+ static byte word_character_map[128];
+
+ static Address word_character_map_address() {
+ return &word_character_map[0];
+ }
+
static Result Execute(Code* code,
String* input,
int start_offset,
const byte* input_start,
const byte* input_end,
- int* output,
- bool at_start);
+ int* output);
};
#endif // V8_NATIVE_REGEXP
diff --git a/deps/v8/src/runtime.cc b/deps/v8/src/runtime.cc
index 3a6e176cb..515343b7b 100644
--- a/deps/v8/src/runtime.cc
+++ b/deps/v8/src/runtime.cc
@@ -107,25 +107,23 @@ static Object* DeepCopyBoilerplate(JSObject* boilerplate) {
// Deep copy local properties.
if (copy->HasFastProperties()) {
FixedArray* properties = copy->properties();
- WriteBarrierMode mode = properties->GetWriteBarrierMode();
for (int i = 0; i < properties->length(); i++) {
Object* value = properties->get(i);
if (value->IsJSObject()) {
- JSObject* jsObject = JSObject::cast(value);
- result = DeepCopyBoilerplate(jsObject);
+ JSObject* js_object = JSObject::cast(value);
+ result = DeepCopyBoilerplate(js_object);
if (result->IsFailure()) return result;
- properties->set(i, result, mode);
+ properties->set(i, result);
}
}
- mode = copy->GetWriteBarrierMode();
int nof = copy->map()->inobject_properties();
for (int i = 0; i < nof; i++) {
Object* value = copy->InObjectPropertyAt(i);
if (value->IsJSObject()) {
- JSObject* jsObject = JSObject::cast(value);
- result = DeepCopyBoilerplate(jsObject);
+ JSObject* js_object = JSObject::cast(value);
+ result = DeepCopyBoilerplate(js_object);
if (result->IsFailure()) return result;
- copy->InObjectPropertyAtPut(i, result, mode);
+ copy->InObjectPropertyAtPut(i, result);
}
}
} else {
@@ -135,20 +133,20 @@ static Object* DeepCopyBoilerplate(JSObject* boilerplate) {
copy->GetLocalPropertyNames(names, 0);
for (int i = 0; i < names->length(); i++) {
ASSERT(names->get(i)->IsString());
- String* keyString = String::cast(names->get(i));
+ String* key_string = String::cast(names->get(i));
PropertyAttributes attributes =
- copy->GetLocalPropertyAttribute(keyString);
+ copy->GetLocalPropertyAttribute(key_string);
// Only deep copy fields from the object literal expression.
// In particular, don't try to copy the length attribute of
// an array.
if (attributes != NONE) continue;
- Object* value = copy->GetProperty(keyString, &attributes);
+ Object* value = copy->GetProperty(key_string, &attributes);
ASSERT(!value->IsFailure());
if (value->IsJSObject()) {
- JSObject* jsObject = JSObject::cast(value);
- result = DeepCopyBoilerplate(jsObject);
+ JSObject* js_object = JSObject::cast(value);
+ result = DeepCopyBoilerplate(js_object);
if (result->IsFailure()) return result;
- result = copy->SetProperty(keyString, result, NONE);
+ result = copy->SetProperty(key_string, result, NONE);
if (result->IsFailure()) return result;
}
}
@@ -160,14 +158,13 @@ static Object* DeepCopyBoilerplate(JSObject* boilerplate) {
switch (copy->GetElementsKind()) {
case JSObject::FAST_ELEMENTS: {
FixedArray* elements = FixedArray::cast(copy->elements());
- WriteBarrierMode mode = elements->GetWriteBarrierMode();
for (int i = 0; i < elements->length(); i++) {
Object* value = elements->get(i);
if (value->IsJSObject()) {
- JSObject* jsObject = JSObject::cast(value);
- result = DeepCopyBoilerplate(jsObject);
+ JSObject* js_object = JSObject::cast(value);
+ result = DeepCopyBoilerplate(js_object);
if (result->IsFailure()) return result;
- elements->set(i, result, mode);
+ elements->set(i, result);
}
}
break;
@@ -180,8 +177,8 @@ static Object* DeepCopyBoilerplate(JSObject* boilerplate) {
if (element_dictionary->IsKey(k)) {
Object* value = element_dictionary->ValueAt(i);
if (value->IsJSObject()) {
- JSObject* jsObject = JSObject::cast(value);
- result = DeepCopyBoilerplate(jsObject);
+ JSObject* js_object = JSObject::cast(value);
+ result = DeepCopyBoilerplate(js_object);
if (result->IsFailure()) return result;
element_dictionary->ValueAtPut(i, result);
}
@@ -583,6 +580,7 @@ static void GetOwnPropertyImplementation(JSObject* obj,
// if args[1] is an accessor on args[0]
// [true, GetFunction, SetFunction, Enumerable, Configurable]
static Object* Runtime_GetOwnProperty(Arguments args) {
+ ASSERT(args.length() == 2);
HandleScope scope;
Handle<FixedArray> elms = Factory::NewFixedArray(5);
Handle<JSArray> desc = Factory::NewJSArrayWithElements(elms);
@@ -626,6 +624,14 @@ static Object* Runtime_GetOwnProperty(Arguments args) {
}
+static Object* Runtime_IsExtensible(Arguments args) {
+ ASSERT(args.length() == 1);
+ CONVERT_CHECKED(JSObject, obj, args[0]);
+ return obj->map()->is_extensible() ? Heap::true_value()
+ : Heap::false_value();
+}
+
+
static Object* Runtime_RegExpCompile(Arguments args) {
HandleScope scope;
ASSERT(args.length() == 3);
@@ -1396,16 +1402,18 @@ static Object* Runtime_SetCode(Arguments args) {
if (!code->IsNull()) {
RUNTIME_ASSERT(code->IsJSFunction());
Handle<JSFunction> fun = Handle<JSFunction>::cast(code);
- SetExpectedNofProperties(target, fun->shared()->expected_nof_properties());
- if (!fun->is_compiled() && !CompileLazy(fun, KEEP_EXCEPTION)) {
+ Handle<SharedFunctionInfo> shared(fun->shared());
+ SetExpectedNofProperties(target, shared->expected_nof_properties());
+
+ if (!EnsureCompiled(shared, KEEP_EXCEPTION)) {
return Failure::Exception();
}
// Set the code, formal parameter count, and the length of the target
// function.
target->set_code(fun->code());
- target->shared()->set_length(fun->shared()->length());
+ target->shared()->set_length(shared->length());
target->shared()->set_formal_parameter_count(
- fun->shared()->formal_parameter_count());
+ shared->formal_parameter_count());
// Set the source code of the target function to undefined.
// SetCode is only used for built-in constructors like String,
// Array, and Object, and some web code
@@ -1428,6 +1436,8 @@ static Object* Runtime_SetCode(Arguments args) {
literals->set(JSFunction::kLiteralGlobalContextIndex,
context->global_context());
}
+ // It's okay to skip the write barrier here because the literals
+ // are guaranteed to be in old space.
target->set_literals(*literals, SKIP_WRITE_BARRIER);
}
@@ -1479,7 +1489,11 @@ static Object* Runtime_StringCharAt(Arguments args) {
CONVERT_CHECKED(String, subject, args[0]);
Object* index = args[1];
- return CharFromCode(CharCodeAt(subject, index));
+ Object* code = CharCodeAt(subject, index);
+ if (code == Heap::nan_value()) {
+ return Heap::undefined_value();
+ }
+ return CharFromCode(code);
}
@@ -3209,6 +3223,170 @@ static Object* Runtime_GetPropertyNamesFast(Arguments args) {
}
+// Find the length of the prototype chain that is to to handled as one. If a
+// prototype object is hidden it is to be viewed as part of the the object it
+// is prototype for.
+static int LocalPrototypeChainLength(JSObject* obj) {
+ int count = 1;
+ Object* proto = obj->GetPrototype();
+ while (proto->IsJSObject() &&
+ JSObject::cast(proto)->map()->is_hidden_prototype()) {
+ count++;
+ proto = JSObject::cast(proto)->GetPrototype();
+ }
+ return count;
+}
+
+
+// Return the names of the local named properties.
+// args[0]: object
+static Object* Runtime_GetLocalPropertyNames(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 1);
+ if (!args[0]->IsJSObject()) {
+ return Heap::undefined_value();
+ }
+ CONVERT_ARG_CHECKED(JSObject, obj, 0);
+
+ // Skip the global proxy as it has no properties and always delegates to the
+ // real global object.
+ if (obj->IsJSGlobalProxy()) {
+ // Only collect names if access is permitted.
+ if (obj->IsAccessCheckNeeded() &&
+ !Top::MayNamedAccess(*obj, Heap::undefined_value(), v8::ACCESS_KEYS)) {
+ Top::ReportFailedAccessCheck(*obj, v8::ACCESS_KEYS);
+ return *Factory::NewJSArray(0);
+ }
+ obj = Handle<JSObject>(JSObject::cast(obj->GetPrototype()));
+ }
+
+ // Find the number of objects making up this.
+ int length = LocalPrototypeChainLength(*obj);
+
+ // Find the number of local properties for each of the objects.
+ int* local_property_count = NewArray<int>(length);
+ int total_property_count = 0;
+ Handle<JSObject> jsproto = obj;
+ for (int i = 0; i < length; i++) {
+ // Only collect names if access is permitted.
+ if (jsproto->IsAccessCheckNeeded() &&
+ !Top::MayNamedAccess(*jsproto,
+ Heap::undefined_value(),
+ v8::ACCESS_KEYS)) {
+ Top::ReportFailedAccessCheck(*jsproto, v8::ACCESS_KEYS);
+ return *Factory::NewJSArray(0);
+ }
+ int n;
+ n = jsproto->NumberOfLocalProperties(static_cast<PropertyAttributes>(NONE));
+ local_property_count[i] = n;
+ total_property_count += n;
+ if (i < length - 1) {
+ jsproto = Handle<JSObject>(JSObject::cast(jsproto->GetPrototype()));
+ }
+ }
+
+ // Allocate an array with storage for all the property names.
+ Handle<FixedArray> names = Factory::NewFixedArray(total_property_count);
+
+ // Get the property names.
+ jsproto = obj;
+ int proto_with_hidden_properties = 0;
+ for (int i = 0; i < length; i++) {
+ jsproto->GetLocalPropertyNames(*names,
+ i == 0 ? 0 : local_property_count[i - 1]);
+ if (!GetHiddenProperties(jsproto, false)->IsUndefined()) {
+ proto_with_hidden_properties++;
+ }
+ if (i < length - 1) {
+ jsproto = Handle<JSObject>(JSObject::cast(jsproto->GetPrototype()));
+ }
+ }
+
+ // Filter out name of hidden propeties object.
+ if (proto_with_hidden_properties > 0) {
+ Handle<FixedArray> old_names = names;
+ names = Factory::NewFixedArray(
+ names->length() - proto_with_hidden_properties);
+ int dest_pos = 0;
+ for (int i = 0; i < total_property_count; i++) {
+ Object* name = old_names->get(i);
+ if (name == Heap::hidden_symbol()) {
+ continue;
+ }
+ names->set(dest_pos++, name);
+ }
+ }
+
+ DeleteArray(local_property_count);
+ return *Factory::NewJSArrayWithElements(names);
+}
+
+
+// Return the names of the local indexed properties.
+// args[0]: object
+static Object* Runtime_GetLocalElementNames(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 1);
+ if (!args[0]->IsJSObject()) {
+ return Heap::undefined_value();
+ }
+ CONVERT_ARG_CHECKED(JSObject, obj, 0);
+
+ int n = obj->NumberOfLocalElements(static_cast<PropertyAttributes>(NONE));
+ Handle<FixedArray> names = Factory::NewFixedArray(n);
+ obj->GetLocalElementKeys(*names, static_cast<PropertyAttributes>(NONE));
+ return *Factory::NewJSArrayWithElements(names);
+}
+
+
+// Return information on whether an object has a named or indexed interceptor.
+// args[0]: object
+static Object* Runtime_GetInterceptorInfo(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 1);
+ if (!args[0]->IsJSObject()) {
+ return Smi::FromInt(0);
+ }
+ CONVERT_ARG_CHECKED(JSObject, obj, 0);
+
+ int result = 0;
+ if (obj->HasNamedInterceptor()) result |= 2;
+ if (obj->HasIndexedInterceptor()) result |= 1;
+
+ return Smi::FromInt(result);
+}
+
+
+// Return property names from named interceptor.
+// args[0]: object
+static Object* Runtime_GetNamedInterceptorPropertyNames(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_CHECKED(JSObject, obj, 0);
+
+ if (obj->HasNamedInterceptor()) {
+ v8::Handle<v8::Array> result = GetKeysForNamedInterceptor(obj, obj);
+ if (!result.IsEmpty()) return *v8::Utils::OpenHandle(*result);
+ }
+ return Heap::undefined_value();
+}
+
+
+// Return element names from indexed interceptor.
+// args[0]: object
+static Object* Runtime_GetIndexedInterceptorElementNames(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_CHECKED(JSObject, obj, 0);
+
+ if (obj->HasIndexedInterceptor()) {
+ v8::Handle<v8::Array> result = GetKeysForIndexedInterceptor(obj, obj);
+ if (!result.IsEmpty()) return *v8::Utils::OpenHandle(*result);
+ }
+ return Heap::undefined_value();
+}
+
+
static Object* Runtime_LocalKeys(Arguments args) {
ASSERT_EQ(args.length(), 1);
CONVERT_CHECKED(JSObject, raw_object, args[0]);
@@ -4498,7 +4676,9 @@ static Object* Runtime_Math_round(Arguments args) {
CONVERT_DOUBLE_CHECKED(x, args[0]);
if (signbit(x) && x >= -0.5) return Heap::minus_zero_value();
- return Heap::NumberFromDouble(floor(x + 0.5));
+ double integer = ceil(x);
+ if (integer - x > 0.5) { integer -= 1.0; }
+ return Heap::NumberFromDouble(integer);
}
@@ -4552,7 +4732,9 @@ static Object* Runtime_NewArguments(Arguments args) {
if (obj->IsFailure()) return obj;
FixedArray* array = FixedArray::cast(obj);
ASSERT(array->length() == length);
- WriteBarrierMode mode = array->GetWriteBarrierMode();
+
+ AssertNoAllocation no_gc;
+ WriteBarrierMode mode = array->GetWriteBarrierMode(no_gc);
for (int i = 0; i < length; i++) {
array->set(i, frame->GetParameter(i), mode);
}
@@ -4577,10 +4759,13 @@ static Object* Runtime_NewArgumentsFast(Arguments args) {
// Allocate the fixed array.
Object* obj = Heap::AllocateRawFixedArray(length);
if (obj->IsFailure()) return obj;
+
+ AssertNoAllocation no_gc;
reinterpret_cast<Array*>(obj)->set_map(Heap::fixed_array_map());
FixedArray* array = FixedArray::cast(obj);
array->set_length(length);
- WriteBarrierMode mode = array->GetWriteBarrierMode();
+
+ WriteBarrierMode mode = array->GetWriteBarrierMode(no_gc);
for (int i = 0; i < length; i++) {
array->set(i, *--parameters, mode);
}
@@ -4619,7 +4804,7 @@ static Code* ComputeConstructStub(Handle<SharedFunctionInfo> shared) {
return Code::cast(code);
}
- return Builtins::builtin(Builtins::JSConstructStubGeneric);
+ return shared->construct_stub();
}
@@ -4663,11 +4848,8 @@ static Object* Runtime_NewObject(Arguments args) {
}
// The function should be compiled for the optimization hints to be available.
- if (!function->shared()->is_compiled()) {
- CompileLazyShared(Handle<SharedFunctionInfo>(function->shared()),
- CLEAR_EXCEPTION,
- 0);
- }
+ Handle<SharedFunctionInfo> shared(function->shared());
+ EnsureCompiled(shared, CLEAR_EXCEPTION);
bool first_allocation = !function->has_initial_map();
Handle<JSObject> result = Factory::NewJSObject(function);
@@ -4706,7 +4888,7 @@ static Object* Runtime_LazyCompile(Arguments args) {
// this means that things called through constructors are never known to
// be in loops. We compile them as if they are in loops here just in case.
ASSERT(!function->is_compiled());
- if (!CompileLazyInLoop(function, KEEP_EXCEPTION)) {
+ if (!CompileLazyInLoop(function, Handle<Object>::null(), KEEP_EXCEPTION)) {
return Failure::Exception();
}
@@ -5868,7 +6050,7 @@ static Object* Runtime_MoveArrayContents(Arguments args) {
to->SetContent(FixedArray::cast(from->elements()));
to->set_length(from->length());
from->SetContent(Heap::empty_fixed_array());
- from->set_length(0);
+ from->set_length(Smi::FromInt(0));
return to;
}
@@ -5911,9 +6093,7 @@ static Object* Runtime_GetArrayKeys(Arguments args) {
} else {
Handle<FixedArray> single_interval = Factory::NewFixedArray(2);
// -1 means start of array.
- single_interval->set(0,
- Smi::FromInt(-1),
- SKIP_WRITE_BARRIER);
+ single_interval->set(0, Smi::FromInt(-1));
uint32_t actual_length = static_cast<uint32_t>(array->elements()->length());
uint32_t min_length = actual_length < length ? actual_length : length;
Handle<Object> length_object =
@@ -6001,21 +6181,6 @@ static Object* Runtime_Break(Arguments args) {
}
-// Find the length of the prototype chain that is to to handled as one. If a
-// prototype object is hidden it is to be viewed as part of the the object it
-// is prototype for.
-static int LocalPrototypeChainLength(JSObject* obj) {
- int count = 1;
- Object* proto = obj->GetPrototype();
- while (proto->IsJSObject() &&
- JSObject::cast(proto)->map()->is_hidden_prototype()) {
- count++;
- proto = JSObject::cast(proto)->GetPrototype();
- }
- return count;
-}
-
-
static Object* DebugLookupResultValue(Object* receiver, String* name,
LookupResult* result,
bool* caught_exception) {
@@ -6185,93 +6350,6 @@ static Object* Runtime_DebugGetProperty(Arguments args) {
}
-// Return the names of the local named properties.
-// args[0]: object
-static Object* Runtime_DebugLocalPropertyNames(Arguments args) {
- HandleScope scope;
- ASSERT(args.length() == 1);
- if (!args[0]->IsJSObject()) {
- return Heap::undefined_value();
- }
- CONVERT_ARG_CHECKED(JSObject, obj, 0);
-
- // Skip the global proxy as it has no properties and always delegates to the
- // real global object.
- if (obj->IsJSGlobalProxy()) {
- obj = Handle<JSObject>(JSObject::cast(obj->GetPrototype()));
- }
-
- // Find the number of objects making up this.
- int length = LocalPrototypeChainLength(*obj);
-
- // Find the number of local properties for each of the objects.
- int* local_property_count = NewArray<int>(length);
- int total_property_count = 0;
- Handle<JSObject> jsproto = obj;
- for (int i = 0; i < length; i++) {
- int n;
- n = jsproto->NumberOfLocalProperties(static_cast<PropertyAttributes>(NONE));
- local_property_count[i] = n;
- total_property_count += n;
- if (i < length - 1) {
- jsproto = Handle<JSObject>(JSObject::cast(jsproto->GetPrototype()));
- }
- }
-
- // Allocate an array with storage for all the property names.
- Handle<FixedArray> names = Factory::NewFixedArray(total_property_count);
-
- // Get the property names.
- jsproto = obj;
- int proto_with_hidden_properties = 0;
- for (int i = 0; i < length; i++) {
- jsproto->GetLocalPropertyNames(*names,
- i == 0 ? 0 : local_property_count[i - 1]);
- if (!GetHiddenProperties(jsproto, false)->IsUndefined()) {
- proto_with_hidden_properties++;
- }
- if (i < length - 1) {
- jsproto = Handle<JSObject>(JSObject::cast(jsproto->GetPrototype()));
- }
- }
-
- // Filter out name of hidden propeties object.
- if (proto_with_hidden_properties > 0) {
- Handle<FixedArray> old_names = names;
- names = Factory::NewFixedArray(
- names->length() - proto_with_hidden_properties);
- int dest_pos = 0;
- for (int i = 0; i < total_property_count; i++) {
- Object* name = old_names->get(i);
- if (name == Heap::hidden_symbol()) {
- continue;
- }
- names->set(dest_pos++, name);
- }
- }
-
- DeleteArray(local_property_count);
- return *Factory::NewJSArrayWithElements(names);
-}
-
-
-// Return the names of the local indexed properties.
-// args[0]: object
-static Object* Runtime_DebugLocalElementNames(Arguments args) {
- HandleScope scope;
- ASSERT(args.length() == 1);
- if (!args[0]->IsJSObject()) {
- return Heap::undefined_value();
- }
- CONVERT_ARG_CHECKED(JSObject, obj, 0);
-
- int n = obj->NumberOfLocalElements(static_cast<PropertyAttributes>(NONE));
- Handle<FixedArray> names = Factory::NewFixedArray(n);
- obj->GetLocalElementKeys(*names, static_cast<PropertyAttributes>(NONE));
- return *Factory::NewJSArrayWithElements(names);
-}
-
-
// Return the property type calculated from the property details.
// args[0]: smi with property details.
static Object* Runtime_DebugPropertyTypeFromDetails(Arguments args) {
@@ -6302,54 +6380,6 @@ static Object* Runtime_DebugPropertyIndexFromDetails(Arguments args) {
}
-// Return information on whether an object has a named or indexed interceptor.
-// args[0]: object
-static Object* Runtime_DebugInterceptorInfo(Arguments args) {
- HandleScope scope;
- ASSERT(args.length() == 1);
- if (!args[0]->IsJSObject()) {
- return Smi::FromInt(0);
- }
- CONVERT_ARG_CHECKED(JSObject, obj, 0);
-
- int result = 0;
- if (obj->HasNamedInterceptor()) result |= 2;
- if (obj->HasIndexedInterceptor()) result |= 1;
-
- return Smi::FromInt(result);
-}
-
-
-// Return property names from named interceptor.
-// args[0]: object
-static Object* Runtime_DebugNamedInterceptorPropertyNames(Arguments args) {
- HandleScope scope;
- ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(JSObject, obj, 0);
-
- if (obj->HasNamedInterceptor()) {
- v8::Handle<v8::Array> result = GetKeysForNamedInterceptor(obj, obj);
- if (!result.IsEmpty()) return *v8::Utils::OpenHandle(*result);
- }
- return Heap::undefined_value();
-}
-
-
-// Return element names from indexed interceptor.
-// args[0]: object
-static Object* Runtime_DebugIndexedInterceptorElementNames(Arguments args) {
- HandleScope scope;
- ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(JSObject, obj, 0);
-
- if (obj->HasIndexedInterceptor()) {
- v8::Handle<v8::Array> result = GetKeysForIndexedInterceptor(obj, obj);
- if (!result.IsEmpty()) return *v8::Utils::OpenHandle(*result);
- }
- return Heap::undefined_value();
-}
-
-
// Return property value from named interceptor.
// args[0]: object
// args[1]: property name
@@ -7198,9 +7228,8 @@ Object* Runtime::FindSharedFunctionInfoInScript(Handle<Script> script,
Handle<SharedFunctionInfo> last;
while (!done) {
HeapIterator iterator;
- while (iterator.has_next()) {
- HeapObject* obj = iterator.next();
- ASSERT(obj != NULL);
+ for (HeapObject* obj = iterator.next();
+ obj != NULL; obj = iterator.next()) {
if (obj->IsSharedFunctionInfo()) {
Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(obj));
if (shared->script() == *script) {
@@ -7265,7 +7294,7 @@ Object* Runtime::FindSharedFunctionInfoInScript(Handle<Script> script,
if (!done) {
// If the candidate is not compiled compile it to reveal any inner
// functions which might contain the requested source position.
- CompileLazyShared(target, KEEP_EXCEPTION, 0);
+ CompileLazyShared(target, KEEP_EXCEPTION);
}
}
@@ -7437,7 +7466,9 @@ static Handle<Object> GetArgumentsObject(JavaScriptFrame* frame,
const int length = frame->GetProvidedParametersCount();
Handle<JSObject> arguments = Factory::NewArgumentsObject(function, length);
Handle<FixedArray> array = Factory::NewFixedArray(length);
- WriteBarrierMode mode = array->GetWriteBarrierMode();
+
+ AssertNoAllocation no_gc;
+ WriteBarrierMode mode = array->GetWriteBarrierMode(no_gc);
for (int i = 0; i < length; i++) {
array->set(i, frame->GetParameter(i), mode);
}
@@ -7656,10 +7687,10 @@ static int DebugReferencedBy(JSObject* target,
int count = 0;
JSObject* last = NULL;
HeapIterator iterator;
- while (iterator.has_next() &&
+ HeapObject* heap_obj = NULL;
+ while (((heap_obj = iterator.next()) != NULL) &&
(max_references == 0 || count < max_references)) {
// Only look at all JSObjects.
- HeapObject* heap_obj = iterator.next();
if (heap_obj->IsJSObject()) {
// Skip context extension objects and argument arrays as these are
// checked in the context of functions using them.
@@ -7769,10 +7800,10 @@ static int DebugConstructedBy(JSFunction* constructor, int max_references,
// Iterate the heap.
int count = 0;
HeapIterator iterator;
- while (iterator.has_next() &&
+ HeapObject* heap_obj = NULL;
+ while (((heap_obj = iterator.next()) != NULL) &&
(max_references == 0 || count < max_references)) {
// Only look at all JSObjects.
- HeapObject* heap_obj = iterator.next();
if (heap_obj->IsJSObject()) {
JSObject* obj = JSObject::cast(heap_obj);
if (obj->map()->constructor() == constructor) {
@@ -7851,7 +7882,8 @@ static Object* Runtime_DebugDisassembleFunction(Arguments args) {
ASSERT(args.length() == 1);
// Get the function and make sure it is compiled.
CONVERT_ARG_CHECKED(JSFunction, func, 0);
- if (!func->is_compiled() && !CompileLazy(func, KEEP_EXCEPTION)) {
+ Handle<SharedFunctionInfo> shared(func->shared());
+ if (!EnsureCompiled(shared, KEEP_EXCEPTION)) {
return Failure::Exception();
}
func->code()->PrintLn();
@@ -7866,10 +7898,11 @@ static Object* Runtime_DebugDisassembleConstructor(Arguments args) {
ASSERT(args.length() == 1);
// Get the function and make sure it is compiled.
CONVERT_ARG_CHECKED(JSFunction, func, 0);
- if (!func->is_compiled() && !CompileLazy(func, KEEP_EXCEPTION)) {
+ Handle<SharedFunctionInfo> shared(func->shared());
+ if (!EnsureCompiled(shared, KEEP_EXCEPTION)) {
return Failure::Exception();
}
- func->shared()->construct_stub()->PrintLn();
+ shared->construct_stub()->PrintLn();
#endif // DEBUG
return Heap::undefined_value();
}
@@ -7920,8 +7953,8 @@ static Handle<Object> Runtime_GetScriptFromScriptName(
// script data.
Handle<Script> script;
HeapIterator iterator;
- while (script.is_null() && iterator.has_next()) {
- HeapObject* obj = iterator.next();
+ HeapObject* obj = NULL;
+ while (script.is_null() && ((obj = iterator.next()) != NULL)) {
// If a script is found check if it has the script data requested.
if (obj->IsScript()) {
if (Script::cast(obj)->name()->IsString()) {
@@ -8019,7 +8052,7 @@ static Object* Runtime_CollectStackTrace(Arguments args) {
if (cursor + 2 < elements->length()) {
elements->set(cursor++, recv);
elements->set(cursor++, fun);
- elements->set(cursor++, offset, SKIP_WRITE_BARRIER);
+ elements->set(cursor++, offset);
} else {
HandleScope scope;
Handle<Object> recv_handle(recv);
@@ -8032,8 +8065,7 @@ static Object* Runtime_CollectStackTrace(Arguments args) {
iter.Advance();
}
- result->set_length(Smi::FromInt(cursor), SKIP_WRITE_BARRIER);
-
+ result->set_length(Smi::FromInt(cursor));
return *result;
}
@@ -8114,12 +8146,12 @@ static Object* Runtime_IS_VAR(Arguments args) {
// Implementation of Runtime
#define F(name, nargs, ressize) \
- { #name, "RuntimeStub_" #name, FUNCTION_ADDR(Runtime_##name), nargs, \
+ { #name, FUNCTION_ADDR(Runtime_##name), nargs, \
static_cast<int>(Runtime::k##name), ressize },
static Runtime::Function Runtime_functions[] = {
RUNTIME_FUNCTION_LIST(F)
- { NULL, NULL, NULL, 0, -1, 0 }
+ { NULL, NULL, 0, -1, 0 }
};
#undef F
diff --git a/deps/v8/src/runtime.h b/deps/v8/src/runtime.h
index b6542a613..b2b8609e7 100644
--- a/deps/v8/src/runtime.h
+++ b/deps/v8/src/runtime.h
@@ -52,6 +52,11 @@ namespace internal {
F(IsPropertyEnumerable, 2, 1) \
F(GetPropertyNames, 1, 1) \
F(GetPropertyNamesFast, 1, 1) \
+ F(GetLocalPropertyNames, 1, 1) \
+ F(GetLocalElementNames, 1, 1) \
+ F(GetInterceptorInfo, 1, 1) \
+ F(GetNamedInterceptorPropertyNames, 1, 1) \
+ F(GetIndexedInterceptorElementNames, 1, 1) \
F(GetArgumentsProperty, 1, 1) \
F(ToFastProperties, 1, 1) \
F(ToSlowProperties, 1, 1) \
@@ -63,6 +68,8 @@ namespace internal {
\
F(GetOwnProperty, 2, 1) \
\
+ F(IsExtensible, 1, 1) \
+ \
/* Utilities */ \
F(GetCalledFunction, 0, 1) \
F(GetFunctionDelegate, 1, 1) \
@@ -285,14 +292,9 @@ namespace internal {
F(Break, 0, 1) \
F(DebugGetPropertyDetails, 2, 1) \
F(DebugGetProperty, 2, 1) \
- F(DebugLocalPropertyNames, 1, 1) \
- F(DebugLocalElementNames, 1, 1) \
F(DebugPropertyTypeFromDetails, 1, 1) \
F(DebugPropertyAttributesFromDetails, 1, 1) \
F(DebugPropertyIndexFromDetails, 1, 1) \
- F(DebugInterceptorInfo, 1, 1) \
- F(DebugNamedInterceptorPropertyNames, 1, 1) \
- F(DebugIndexedInterceptorElementNames, 1, 1) \
F(DebugNamedInterceptorPropertyValue, 2, 1) \
F(DebugIndexedInterceptorElementValue, 2, 1) \
F(CheckExecutionState, 1, 1) \
@@ -371,9 +373,6 @@ class Runtime : public AllStatic {
// The JS name of the function.
const char* name;
- // The name of the stub that calls the runtime function.
- const char* stub_name;
-
// The C++ (native) entry point.
byte* entry;
diff --git a/deps/v8/src/runtime.js b/deps/v8/src/runtime.js
index ce2f197f9..c4c855eb9 100644
--- a/deps/v8/src/runtime.js
+++ b/deps/v8/src/runtime.js
@@ -541,7 +541,9 @@ function ToObject(x) {
if (IS_STRING(x)) return new $String(x);
if (IS_NUMBER(x)) return new $Number(x);
if (IS_BOOLEAN(x)) return new $Boolean(x);
- if (x == null) throw %MakeTypeError('null_to_object', []);
+ if (IS_NULL_OR_UNDEFINED(x) && !IS_UNDETECTABLE(x)) {
+ throw %MakeTypeError('null_to_object', []);
+ }
return x;
}
diff --git a/deps/v8/src/scanner.cc b/deps/v8/src/scanner.cc
index 0d3b789f9..cf7e49f85 100644..100755
--- a/deps/v8/src/scanner.cc
+++ b/deps/v8/src/scanner.cc
@@ -323,11 +323,14 @@ void KeywordMatcher::Step(uc32 input) {
// ----------------------------------------------------------------------------
// Scanner
-Scanner::Scanner(bool pre) : stack_overflow_(false), is_pre_parsing_(pre) { }
+Scanner::Scanner(ParserMode pre)
+ : stack_overflow_(false), is_pre_parsing_(pre == PREPARSE) { }
-void Scanner::Init(Handle<String> source, unibrow::CharacterStream* stream,
- int position) {
+void Scanner::Init(Handle<String> source,
+ unibrow::CharacterStream* stream,
+ int position,
+ ParserLanguage language) {
// Initialize the source buffer.
if (!source.is_null() && StringShape(*source).IsExternalTwoByte()) {
two_byte_string_buffer_.Initialize(
@@ -339,6 +342,7 @@ void Scanner::Init(Handle<String> source, unibrow::CharacterStream* stream,
}
position_ = position;
+ is_parsing_json_ = (language == JSON);
// Set c0_ (one character ahead)
ASSERT(kCharacterLookaheadBufferSize == 1);
@@ -416,7 +420,17 @@ static inline bool IsByteOrderMark(uc32 c) {
}
-bool Scanner::SkipWhiteSpace() {
+bool Scanner::SkipJsonWhiteSpace() {
+ int start_position = source_pos();
+ // JSON WhiteSpace is tab, carrige-return, newline and space.
+ while (c0_ == ' ' || c0_ == '\n' || c0_ == '\r' || c0_ == '\t') {
+ Advance();
+ }
+ return source_pos() != start_position;
+}
+
+
+bool Scanner::SkipJavaScriptWhiteSpace() {
int start_position = source_pos();
while (true) {
@@ -512,7 +526,194 @@ Token::Value Scanner::ScanHtmlComment() {
}
-void Scanner::Scan() {
+
+void Scanner::ScanJson() {
+ next_.literal_buffer = NULL;
+ Token::Value token;
+ has_line_terminator_before_next_ = false;
+ do {
+ // Remember the position of the next token
+ next_.location.beg_pos = source_pos();
+ switch (c0_) {
+ case '\t':
+ case '\r':
+ case '\n':
+ case ' ':
+ Advance();
+ token = Token::WHITESPACE;
+ break;
+ case '{':
+ Advance();
+ token = Token::LBRACE;
+ break;
+ case '}':
+ Advance();
+ token = Token::RBRACE;
+ break;
+ case '[':
+ Advance();
+ token = Token::LBRACK;
+ break;
+ case ']':
+ Advance();
+ token = Token::RBRACK;
+ break;
+ case ':':
+ Advance();
+ token = Token::COLON;
+ break;
+ case ',':
+ Advance();
+ token = Token::COMMA;
+ break;
+ case '"':
+ token = ScanJsonString();
+ break;
+ case '-':
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ token = ScanJsonNumber();
+ break;
+ case 't':
+ token = ScanJsonIdentifier("true", Token::TRUE_LITERAL);
+ break;
+ case 'f':
+ token = ScanJsonIdentifier("false", Token::FALSE_LITERAL);
+ break;
+ case 'n':
+ token = ScanJsonIdentifier("null", Token::NULL_LITERAL);
+ break;
+ default:
+ if (c0_ < 0) {
+ Advance();
+ token = Token::EOS;
+ } else {
+ Advance();
+ token = Select(Token::ILLEGAL);
+ }
+ }
+ } while (token == Token::WHITESPACE);
+
+ next_.location.end_pos = source_pos();
+ next_.token = token;
+}
+
+
+Token::Value Scanner::ScanJsonString() {
+ ASSERT_EQ('"', c0_);
+ Advance();
+ StartLiteral();
+ while (c0_ != '"' && c0_ > 0) {
+ // Check for control character (0x00-0x1f) or unterminated string (<0).
+ if (c0_ < 0x20) return Token::ILLEGAL;
+ if (c0_ != '\\') {
+ AddCharAdvance();
+ } else {
+ Advance();
+ switch (c0_) {
+ case '"':
+ case '\\':
+ case '/':
+ AddChar(c0_);
+ break;
+ case 'b':
+ AddChar('\x08');
+ break;
+ case 'f':
+ AddChar('\x0c');
+ break;
+ case 'n':
+ AddChar('\x0a');
+ break;
+ case 'r':
+ AddChar('\x0d');
+ break;
+ case 't':
+ AddChar('\x09');
+ break;
+ case 'u': {
+ uc32 value = 0;
+ for (int i = 0; i < 4; i++) {
+ Advance();
+ int digit = HexValue(c0_);
+ if (digit < 0) return Token::ILLEGAL;
+ value = value * 16 + digit;
+ }
+ AddChar(value);
+ break;
+ }
+ default:
+ return Token::ILLEGAL;
+ }
+ Advance();
+ }
+ }
+ if (c0_ != '"') {
+ return Token::ILLEGAL;
+ }
+ TerminateLiteral();
+ Advance();
+ return Token::STRING;
+}
+
+
+Token::Value Scanner::ScanJsonNumber() {
+ StartLiteral();
+ if (c0_ == '-') AddCharAdvance();
+ if (c0_ == '0') {
+ AddCharAdvance();
+ // Prefix zero is only allowed if it's the only digit before
+ // a decimal point or exponent.
+ if ('0' <= c0_ && c0_ <= '9') return Token::ILLEGAL;
+ } else {
+ if (c0_ < '1' || c0_ > '9') return Token::ILLEGAL;
+ do {
+ AddCharAdvance();
+ } while (c0_ >= '0' && c0_ <= '9');
+ }
+ if (c0_ == '.') {
+ AddCharAdvance();
+ if (c0_ < '0' || c0_ > '9') return Token::ILLEGAL;
+ do {
+ AddCharAdvance();
+ } while (c0_ >= '0' && c0_ <= '9');
+ }
+ if ((c0_ | 0x20) == 'e') {
+ AddCharAdvance();
+ if (c0_ == '-' || c0_ == '+') AddCharAdvance();
+ if (c0_ < '0' || c0_ > '9') return Token::ILLEGAL;
+ do {
+ AddCharAdvance();
+ } while (c0_ >= '0' && c0_ <= '9');
+ }
+ TerminateLiteral();
+ return Token::NUMBER;
+}
+
+
+Token::Value Scanner::ScanJsonIdentifier(const char* text,
+ Token::Value token) {
+ StartLiteral();
+ while (*text != '\0') {
+ if (c0_ != *text) return Token::ILLEGAL;
+ Advance();
+ text++;
+ }
+ if (kIsIdentifierPart.get(c0_)) return Token::ILLEGAL;
+ TerminateLiteral();
+ return token;
+}
+
+
+void Scanner::ScanJavaScript() {
next_.literal_buffer = NULL;
Token::Value token;
has_line_terminator_before_next_ = false;
diff --git a/deps/v8/src/scanner.h b/deps/v8/src/scanner.h
index 9d7b34e7c..f0035c0eb 100644
--- a/deps/v8/src/scanner.h
+++ b/deps/v8/src/scanner.h
@@ -252,18 +252,22 @@ class KeywordMatcher {
};
+enum ParserMode { PARSE, PREPARSE };
+enum ParserLanguage { JAVASCRIPT, JSON };
+
+
class Scanner {
public:
-
typedef unibrow::Utf8InputBuffer<1024> Utf8Decoder;
// Construction
- explicit Scanner(bool is_pre_parsing);
+ explicit Scanner(ParserMode parse_mode);
// Initialize the Scanner to scan source:
void Init(Handle<String> source,
unibrow::CharacterStream* stream,
- int position);
+ int position,
+ ParserLanguage language);
// Returns the next token.
Token::Value Next();
@@ -377,6 +381,7 @@ class Scanner {
TokenDesc next_; // desc for next token (one token look-ahead)
bool has_line_terminator_before_next_;
bool is_pre_parsing_;
+ bool is_parsing_json_;
// Literal buffer support
void StartLiteral();
@@ -391,14 +396,57 @@ class Scanner {
c0_ = ch;
}
- bool SkipWhiteSpace();
+ bool SkipWhiteSpace() {
+ if (is_parsing_json_) {
+ return SkipJsonWhiteSpace();
+ } else {
+ return SkipJavaScriptWhiteSpace();
+ }
+ }
+ bool SkipJavaScriptWhiteSpace();
+ bool SkipJsonWhiteSpace();
Token::Value SkipSingleLineComment();
Token::Value SkipMultiLineComment();
inline Token::Value Select(Token::Value tok);
inline Token::Value Select(uc32 next, Token::Value then, Token::Value else_);
- void Scan();
+ inline void Scan() {
+ if (is_parsing_json_) {
+ ScanJson();
+ } else {
+ ScanJavaScript();
+ }
+ }
+
+ // Scans a single JavaScript token.
+ void ScanJavaScript();
+
+ // Scan a single JSON token. The JSON lexical grammar is specified in the
+ // ECMAScript 5 standard, section 15.12.1.1.
+ // Recognizes all of the single-character tokens directly, or calls a function
+ // to scan a number, string or identifier literal.
+ // The only allowed whitespace characters between tokens are tab,
+ // carrige-return, newline and space.
+ void ScanJson();
+
+ // A JSON number (production JSONNumber) is a subset of the valid JavaScript
+ // decimal number literals.
+ // It includes an optional minus sign, must have at least one
+ // digit before and after a decimal point, may not have prefixed zeros (unless
+ // the integer part is zero), and may include an exponent part (e.g., "e-10").
+ // Hexadecimal and octal numbers are not allowed.
+ Token::Value ScanJsonNumber();
+ // A JSON string (production JSONString) is subset of valid JavaScript string
+ // literals. The string must only be double-quoted (not single-quoted), and
+ // the only allowed backslash-escapes are ", /, \, b, f, n, r, t and
+ // four-digit hex escapes (uXXXX). Any other use of backslashes is invalid.
+ Token::Value ScanJsonString();
+ // Used to recognizes one of the literals "true", "false", or "null". These
+ // are the only valid JSON identifiers (productions JSONBooleanLiteral,
+ // JSONNullLiteral).
+ Token::Value ScanJsonIdentifier(const char* text, Token::Value token);
+
void ScanDecimalDigits();
Token::Value ScanNumber(bool seen_period);
Token::Value ScanIdentifier();
diff --git a/deps/v8/src/serialize.cc b/deps/v8/src/serialize.cc
index db46f3acf..bc934fb5a 100644
--- a/deps/v8/src/serialize.cc
+++ b/deps/v8/src/serialize.cc
@@ -44,67 +44,6 @@
namespace v8 {
namespace internal {
-// Mapping objects to their location after deserialization.
-// This is used during building, but not at runtime by V8.
-class SerializationAddressMapper {
- public:
- static bool IsMapped(HeapObject* obj) {
- EnsureMapExists();
- return serialization_map_->Lookup(Key(obj), Hash(obj), false) != NULL;
- }
-
- static int MappedTo(HeapObject* obj) {
- ASSERT(IsMapped(obj));
- return static_cast<int>(reinterpret_cast<intptr_t>(
- serialization_map_->Lookup(Key(obj), Hash(obj), false)->value));
- }
-
- static void Map(HeapObject* obj, int to) {
- EnsureMapExists();
- ASSERT(!IsMapped(obj));
- HashMap::Entry* entry =
- serialization_map_->Lookup(Key(obj), Hash(obj), true);
- entry->value = Value(to);
- }
-
- static void Zap() {
- if (serialization_map_ != NULL) {
- delete serialization_map_;
- }
- serialization_map_ = NULL;
- }
-
- private:
- static bool SerializationMatchFun(void* key1, void* key2) {
- return key1 == key2;
- }
-
- static uint32_t Hash(HeapObject* obj) {
- return static_cast<int32_t>(reinterpret_cast<intptr_t>(obj->address()));
- }
-
- static void* Key(HeapObject* obj) {
- return reinterpret_cast<void*>(obj->address());
- }
-
- static void* Value(int v) {
- return reinterpret_cast<void*>(v);
- }
-
- static void EnsureMapExists() {
- if (serialization_map_ == NULL) {
- serialization_map_ = new HashMap(&SerializationMatchFun);
- }
- }
-
- static HashMap* serialization_map_;
-};
-
-
-HashMap* SerializationAddressMapper::serialization_map_ = NULL;
-
-
-
// -----------------------------------------------------------------------------
// Coding of external references.
@@ -241,7 +180,7 @@ void ExternalReferenceTable::PopulateTable() {
static const RefTableEntry ref_table[] = {
// Builtins
-#define DEF_ENTRY_C(name) \
+#define DEF_ENTRY_C(name, ignored) \
{ C_BUILTIN, \
Builtins::c_##name, \
"Builtins::" #name },
@@ -249,11 +188,11 @@ void ExternalReferenceTable::PopulateTable() {
BUILTIN_LIST_C(DEF_ENTRY_C)
#undef DEF_ENTRY_C
-#define DEF_ENTRY_C(name) \
+#define DEF_ENTRY_C(name, ignored) \
{ BUILTIN, \
Builtins::name, \
"Builtins::" #name },
-#define DEF_ENTRY_A(name, kind, state) DEF_ENTRY_C(name)
+#define DEF_ENTRY_A(name, kind, state) DEF_ENTRY_C(name, ignored)
BUILTIN_LIST_C(DEF_ENTRY_C)
BUILTIN_LIST_A(DEF_ENTRY_A)
@@ -396,10 +335,6 @@ void ExternalReferenceTable::PopulateTable() {
"V8::RandomPositiveSmi");
// Miscellaneous
- Add(ExternalReference::builtin_passed_function().address(),
- UNCLASSIFIED,
- 1,
- "Builtins::builtin_passed_function");
Add(ExternalReference::the_hole_value_location().address(),
UNCLASSIFIED,
2,
@@ -483,15 +418,19 @@ void ExternalReferenceTable::PopulateTable() {
UNCLASSIFIED,
21,
"NativeRegExpMacroAssembler::GrowStack()");
+ Add(ExternalReference::re_word_character_map().address(),
+ UNCLASSIFIED,
+ 22,
+ "NativeRegExpMacroAssembler::word_character_map");
#endif
// Keyed lookup cache.
Add(ExternalReference::keyed_lookup_cache_keys().address(),
UNCLASSIFIED,
- 22,
+ 23,
"KeyedLookupCache::keys()");
Add(ExternalReference::keyed_lookup_cache_field_offsets().address(),
UNCLASSIFIED,
- 23,
+ 24,
"KeyedLookupCache::field_offsets()");
}
@@ -558,11 +497,10 @@ ExternalReferenceDecoder::~ExternalReferenceDecoder() {
bool Serializer::serialization_enabled_ = false;
bool Serializer::too_late_to_enable_now_ = false;
+ExternalReferenceDecoder* Deserializer::external_reference_decoder_ = NULL;
-Deserializer::Deserializer(SnapshotByteSource* source)
- : source_(source),
- external_reference_decoder_(NULL) {
+Deserializer::Deserializer(SnapshotByteSource* source) : source_(source) {
}
@@ -648,12 +586,34 @@ void Deserializer::Deserialize() {
ASSERT_EQ(NULL, ThreadState::FirstInUse());
// No active handles.
ASSERT(HandleScopeImplementer::instance()->blocks()->is_empty());
+ // Make sure the entire partial snapshot cache is traversed, filling it with
+ // valid object pointers.
+ partial_snapshot_cache_length_ = kPartialSnapshotCacheCapacity;
ASSERT_EQ(NULL, external_reference_decoder_);
external_reference_decoder_ = new ExternalReferenceDecoder();
- Heap::IterateRoots(this, VISIT_ONLY_STRONG);
+ Heap::IterateStrongRoots(this, VISIT_ONLY_STRONG);
+ Heap::IterateWeakRoots(this, VISIT_ALL);
+}
+
+
+void Deserializer::DeserializePartial(Object** root) {
+ // Don't GC while deserializing - just expand the heap.
+ AlwaysAllocateScope always_allocate;
+ // Don't use the free lists while deserializing.
+ LinearAllocationScope allocate_linearly;
+ if (external_reference_decoder_ == NULL) {
+ external_reference_decoder_ = new ExternalReferenceDecoder();
+ }
+ VisitPointer(root);
+}
+
+
+Deserializer::~Deserializer() {
ASSERT(source_->AtEOF());
- delete external_reference_decoder_;
- external_reference_decoder_ = NULL;
+ if (external_reference_decoder_ != NULL) {
+ delete external_reference_decoder_;
+ external_reference_decoder_ = NULL;
+ }
}
@@ -680,6 +640,9 @@ void Deserializer::ReadObject(int space_number,
*write_back = HeapObject::FromAddress(address);
Object** current = reinterpret_cast<Object**>(address);
Object** limit = current + (size >> kPointerSizeLog2);
+ if (FLAG_log_snapshot_positions) {
+ LOG(SnapshotPositionEvent(address, source_->position()));
+ }
ReadChunk(current, limit, space_number, address);
}
@@ -739,7 +702,6 @@ void Deserializer::ReadChunk(Object** current,
break;
case OBJECT_SERIALIZATION + CODE_SPACE:
ReadObject(CODE_SPACE, Heap::code_space(), current++);
- LOG(LogCodeObject(current[-1]));
break;
case OBJECT_SERIALIZATION + CELL_SPACE:
ReadObject(CELL_SPACE, Heap::cell_space(), current++);
@@ -749,7 +711,6 @@ void Deserializer::ReadChunk(Object** current,
break;
case OBJECT_SERIALIZATION + kLargeCode:
ReadObject(kLargeCode, Heap::lo_space(), current++);
- LOG(LogCodeObject(current[-1]));
break;
case OBJECT_SERIALIZATION + kLargeFixedArray:
ReadObject(kLargeFixedArray, Heap::lo_space(), current++);
@@ -758,7 +719,6 @@ void Deserializer::ReadChunk(Object** current,
Object* new_code_object = NULL;
ReadObject(kLargeCode, Heap::lo_space(), &new_code_object);
Code* code_object = reinterpret_cast<Code*>(new_code_object);
- LOG(LogCodeObject(code_object));
// Setting a branch/call to another code object from code.
Address location_of_branch_data = reinterpret_cast<Address>(current);
Assembler::set_target_at(location_of_branch_data,
@@ -771,7 +731,6 @@ void Deserializer::ReadChunk(Object** current,
Object* new_code_object = NULL;
ReadObject(CODE_SPACE, Heap::code_space(), &new_code_object);
Code* code_object = reinterpret_cast<Code*>(new_code_object);
- LOG(LogCodeObject(code_object));
// Setting a branch/call to another code object from code.
Address location_of_branch_data = reinterpret_cast<Address>(current);
Assembler::set_target_at(location_of_branch_data,
@@ -866,6 +825,21 @@ void Deserializer::ReadChunk(Object** current,
*current++ = reinterpret_cast<Object*>(resource);
break;
}
+ case ROOT_SERIALIZATION: {
+ int root_id = source_->GetInt();
+ *current++ = Heap::roots_address()[root_id];
+ break;
+ }
+ case PARTIAL_SNAPSHOT_CACHE_ENTRY: {
+ int cache_index = source_->GetInt();
+ *current++ = partial_snapshot_cache_[cache_index];
+ break;
+ }
+ case SYNCHRONIZE: {
+ // If we get here then that indicates that you have a mismatch between
+ // the number of GC roots when serializing and deserializing.
+ UNREACHABLE();
+ }
default:
UNREACHABLE();
}
@@ -919,14 +893,14 @@ Serializer::Serializer(SnapshotByteSink* sink)
: sink_(sink),
current_root_index_(0),
external_reference_encoder_(NULL),
- partial_(false) {
+ large_object_total_(0) {
for (int i = 0; i <= LAST_SPACE; i++) {
fullness_[i] = 0;
}
}
-void Serializer::Serialize() {
+void StartupSerializer::SerializeStrongReferences() {
// No active threads.
CHECK_EQ(NULL, ThreadState::FirstInUse());
// No active or weak handles.
@@ -940,20 +914,30 @@ void Serializer::Serialize() {
CHECK_NE(v8::INSTALLED, ext->state());
}
external_reference_encoder_ = new ExternalReferenceEncoder();
- Heap::IterateRoots(this, VISIT_ONLY_STRONG);
+ Heap::IterateStrongRoots(this, VISIT_ONLY_STRONG);
delete external_reference_encoder_;
external_reference_encoder_ = NULL;
- SerializationAddressMapper::Zap();
}
-void Serializer::SerializePartial(Object** object) {
- partial_ = true;
+void PartialSerializer::Serialize(Object** object) {
external_reference_encoder_ = new ExternalReferenceEncoder();
this->VisitPointer(object);
+
+ // After we have done the partial serialization the partial snapshot cache
+ // will contain some references needed to decode the partial snapshot. We
+ // fill it up with undefineds so it has a predictable length so the
+ // deserialization code doesn't need to know the length.
+ for (int index = partial_snapshot_cache_length_;
+ index < kPartialSnapshotCacheCapacity;
+ index++) {
+ partial_snapshot_cache_[index] = Heap::undefined_value();
+ startup_serializer_->VisitPointer(&partial_snapshot_cache_[index]);
+ }
+ partial_snapshot_cache_length_ = kPartialSnapshotCacheCapacity;
+
delete external_reference_encoder_;
external_reference_encoder_ = NULL;
- SerializationAddressMapper::Zap();
}
@@ -972,7 +956,54 @@ void Serializer::VisitPointers(Object** start, Object** end) {
}
-int Serializer::RootIndex(HeapObject* heap_object) {
+Object* SerializerDeserializer::partial_snapshot_cache_[
+ kPartialSnapshotCacheCapacity];
+int SerializerDeserializer::partial_snapshot_cache_length_ = 0;
+
+
+// This ensures that the partial snapshot cache keeps things alive during GC and
+// tracks their movement. When it is called during serialization of the startup
+// snapshot the partial snapshot is empty, so nothing happens. When the partial
+// (context) snapshot is created, this array is populated with the pointers that
+// the partial snapshot will need. As that happens we emit serialized objects to
+// the startup snapshot that correspond to the elements of this cache array. On
+// deserialization we therefore need to visit the cache array. This fills it up
+// with pointers to deserialized objects.
+void SerializerDeserializer::Iterate(ObjectVisitor *visitor) {
+ visitor->VisitPointers(
+ &partial_snapshot_cache_[0],
+ &partial_snapshot_cache_[partial_snapshot_cache_length_]);
+}
+
+
+// When deserializing we need to set the size of the snapshot cache. This means
+// the root iteration code (above) will iterate over array elements, writing the
+// references to deserialized objects in them.
+void SerializerDeserializer::SetSnapshotCacheSize(int size) {
+ partial_snapshot_cache_length_ = size;
+}
+
+
+int PartialSerializer::PartialSnapshotCacheIndex(HeapObject* heap_object) {
+ for (int i = 0; i < partial_snapshot_cache_length_; i++) {
+ Object* entry = partial_snapshot_cache_[i];
+ if (entry == heap_object) return i;
+ }
+ // We didn't find the object in the cache. So we add it to the cache and
+ // then visit the pointer so that it becomes part of the startup snapshot
+ // and we can refer to it from the partial snapshot.
+ int length = partial_snapshot_cache_length_;
+ CHECK(length < kPartialSnapshotCacheCapacity);
+ partial_snapshot_cache_[length] = heap_object;
+ startup_serializer_->VisitPointer(&partial_snapshot_cache_[length]);
+ // We don't recurse from the startup snapshot generator into the partial
+ // snapshot generator.
+ ASSERT(length == partial_snapshot_cache_length_);
+ return partial_snapshot_cache_length_++;
+}
+
+
+int PartialSerializer::RootIndex(HeapObject* heap_object) {
for (int i = 0; i < Heap::kRootListLength; i++) {
Object* root = Heap::roots_address()[i];
if (root == heap_object) return i;
@@ -981,67 +1012,136 @@ int Serializer::RootIndex(HeapObject* heap_object) {
}
-void Serializer::SerializeObject(
- Object* o,
+// Encode the location of an already deserialized object in order to write its
+// location into a later object. We can encode the location as an offset from
+// the start of the deserialized objects or as an offset backwards from the
+// current allocation pointer.
+void Serializer::SerializeReferenceToPreviousObject(
+ int space,
+ int address,
ReferenceRepresentation reference_representation) {
- CHECK(o->IsHeapObject());
- HeapObject* heap_object = HeapObject::cast(o);
- if (partial_) {
- int root_index = RootIndex(heap_object);
- if (root_index != kInvalidRootIndex) {
- sink_->Put(ROOT_SERIALIZATION, "RootSerialization");
- sink_->PutInt(root_index, "root_index");
- return;
+ int offset = CurrentAllocationAddress(space) - address;
+ bool from_start = true;
+ if (SpaceIsPaged(space)) {
+ // For paged space it is simple to encode back from current allocation if
+ // the object is on the same page as the current allocation pointer.
+ if ((CurrentAllocationAddress(space) >> kPageSizeBits) ==
+ (address >> kPageSizeBits)) {
+ from_start = false;
+ address = offset;
}
- // All the symbols that the snapshot needs should be in the root table.
- ASSERT(!heap_object->IsSymbol());
- }
- if (SerializationAddressMapper::IsMapped(heap_object)) {
- int space = SpaceOfAlreadySerializedObject(heap_object);
- int address = SerializationAddressMapper::MappedTo(heap_object);
- int offset = CurrentAllocationAddress(space) - address;
- bool from_start = true;
- if (SpaceIsPaged(space)) {
- if ((CurrentAllocationAddress(space) >> kPageSizeBits) ==
- (address >> kPageSizeBits)) {
- from_start = false;
- address = offset;
- }
- } else if (space == NEW_SPACE) {
- if (offset < address) {
- from_start = false;
- address = offset;
- }
+ } else if (space == NEW_SPACE) {
+ // For new space it is always simple to encode back from current allocation.
+ if (offset < address) {
+ from_start = false;
+ address = offset;
}
- // If we are actually dealing with real offsets (and not a numbering of
- // all objects) then we should shift out the bits that are always 0.
- if (!SpaceIsLarge(space)) address >>= kObjectAlignmentBits;
- if (reference_representation == CODE_TARGET_REPRESENTATION) {
- if (from_start) {
- sink_->Put(CODE_REFERENCE_SERIALIZATION + space, "RefCodeSer");
- sink_->PutInt(address, "address");
- } else {
- sink_->Put(CODE_BACKREF_SERIALIZATION + space, "BackRefCodeSer");
- sink_->PutInt(address, "address");
- }
+ }
+ // If we are actually dealing with real offsets (and not a numbering of
+ // all objects) then we should shift out the bits that are always 0.
+ if (!SpaceIsLarge(space)) address >>= kObjectAlignmentBits;
+ // On some architectures references between code objects are encoded
+ // specially (as relative offsets). Such references have their own
+ // special tags to simplify the deserializer.
+ if (reference_representation == CODE_TARGET_REPRESENTATION) {
+ if (from_start) {
+ sink_->Put(CODE_REFERENCE_SERIALIZATION + space, "RefCodeSer");
+ sink_->PutInt(address, "address");
} else {
- CHECK_EQ(TAGGED_REPRESENTATION, reference_representation);
- if (from_start) {
-#define COMMON_REFS_CASE(tag, common_space, common_offset) \
- if (space == common_space && address == common_offset) { \
- sink_->PutSection(tag + REFERENCE_SERIALIZATION, "RefSer"); \
- } else /* NOLINT */
- COMMON_REFERENCE_PATTERNS(COMMON_REFS_CASE)
+ sink_->Put(CODE_BACKREF_SERIALIZATION + space, "BackRefCodeSer");
+ sink_->PutInt(address, "address");
+ }
+ } else {
+ // Regular absolute references.
+ CHECK_EQ(TAGGED_REPRESENTATION, reference_representation);
+ if (from_start) {
+ // There are some common offsets that have their own specialized encoding.
+#define COMMON_REFS_CASE(tag, common_space, common_offset) \
+ if (space == common_space && address == common_offset) { \
+ sink_->PutSection(tag + REFERENCE_SERIALIZATION, "RefSer"); \
+ } else /* NOLINT */
+ COMMON_REFERENCE_PATTERNS(COMMON_REFS_CASE)
#undef COMMON_REFS_CASE
- { /* NOLINT */
- sink_->Put(REFERENCE_SERIALIZATION + space, "RefSer");
- sink_->PutInt(address, "address");
- }
- } else {
- sink_->Put(BACKREF_SERIALIZATION + space, "BackRefSer");
+ { /* NOLINT */
+ sink_->Put(REFERENCE_SERIALIZATION + space, "RefSer");
sink_->PutInt(address, "address");
}
+ } else {
+ sink_->Put(BACKREF_SERIALIZATION + space, "BackRefSer");
+ sink_->PutInt(address, "address");
}
+ }
+}
+
+
+void StartupSerializer::SerializeObject(
+ Object* o,
+ ReferenceRepresentation reference_representation) {
+ CHECK(o->IsHeapObject());
+ HeapObject* heap_object = HeapObject::cast(o);
+
+ if (address_mapper_.IsMapped(heap_object)) {
+ int space = SpaceOfAlreadySerializedObject(heap_object);
+ int address = address_mapper_.MappedTo(heap_object);
+ SerializeReferenceToPreviousObject(space,
+ address,
+ reference_representation);
+ } else {
+ // Object has not yet been serialized. Serialize it here.
+ ObjectSerializer object_serializer(this,
+ heap_object,
+ sink_,
+ reference_representation);
+ object_serializer.Serialize();
+ }
+}
+
+
+void StartupSerializer::SerializeWeakReferences() {
+ for (int i = partial_snapshot_cache_length_;
+ i < kPartialSnapshotCacheCapacity;
+ i++) {
+ sink_->Put(ROOT_SERIALIZATION, "RootSerialization");
+ sink_->PutInt(Heap::kUndefinedValueRootIndex, "root_index");
+ }
+ Heap::IterateWeakRoots(this, VISIT_ALL);
+}
+
+
+void PartialSerializer::SerializeObject(
+ Object* o,
+ ReferenceRepresentation reference_representation) {
+ CHECK(o->IsHeapObject());
+ HeapObject* heap_object = HeapObject::cast(o);
+
+ int root_index;
+ if ((root_index = RootIndex(heap_object)) != kInvalidRootIndex) {
+ sink_->Put(ROOT_SERIALIZATION, "RootSerialization");
+ sink_->PutInt(root_index, "root_index");
+ return;
+ }
+
+ if (ShouldBeInThePartialSnapshotCache(heap_object)) {
+ int cache_index = PartialSnapshotCacheIndex(heap_object);
+ sink_->Put(PARTIAL_SNAPSHOT_CACHE_ENTRY, "PartialSnapshotCache");
+ sink_->PutInt(cache_index, "partial_snapshot_cache_index");
+ return;
+ }
+
+ // Pointers from the partial snapshot to the objects in the startup snapshot
+ // should go through the root array or through the partial snapshot cache.
+ // If this is not the case you may have to add something to the root array.
+ ASSERT(!startup_serializer_->address_mapper()->IsMapped(heap_object));
+ // All the symbols that the partial snapshot needs should be either in the
+ // root table or in the partial snapshot cache.
+ ASSERT(!heap_object->IsSymbol());
+
+ if (address_mapper_.IsMapped(heap_object)) {
+ int space = SpaceOfAlreadySerializedObject(heap_object);
+ int address = address_mapper_.MappedTo(heap_object);
+ SerializeReferenceToPreviousObject(space,
+ address,
+ reference_representation);
} else {
// Object has not yet been serialized. Serialize it here.
ObjectSerializer serializer(this,
@@ -1053,7 +1153,6 @@ void Serializer::SerializeObject(
}
-
void Serializer::ObjectSerializer::Serialize() {
int space = Serializer::SpaceOfObject(object_);
int size = object_->Size();
@@ -1066,11 +1165,12 @@ void Serializer::ObjectSerializer::Serialize() {
}
sink_->PutInt(size >> kObjectAlignmentBits, "Size in words");
+ LOG(SnapshotPositionEvent(object_->address(), sink_->Position()));
+
// Mark this object as already serialized.
bool start_new_page;
- SerializationAddressMapper::Map(
- object_,
- serializer_->Allocate(space, size, &start_new_page));
+ int offset = serializer_->Allocate(space, size, &start_new_page);
+ serializer_->address_mapper()->AddMapping(object_, offset);
if (start_new_page) {
sink_->Put(START_NEW_PAGE_SERIALIZATION, "NewPage");
sink_->PutSection(space, "NewPageSpace");
@@ -1230,6 +1330,7 @@ int Serializer::Allocate(int space, int size, bool* new_page) {
// In large object space we merely number the objects instead of trying to
// determine some sort of address.
*new_page = true;
+ large_object_total_ += size;
return fullness_[LO_SPACE]++;
}
*new_page = false;
diff --git a/deps/v8/src/serialize.h b/deps/v8/src/serialize.h
index b32f4e811..ce3b0061c 100644
--- a/deps/v8/src/serialize.h
+++ b/deps/v8/src/serialize.h
@@ -147,6 +147,8 @@ class SnapshotByteSource {
return position_ == length_;
}
+ int position() { return position_; }
+
private:
const byte* data_;
int length_;
@@ -183,9 +185,14 @@ class SnapshotByteSource {
f(14, 32) \
f(15, 36)
-// The SerDes class is a common superclass for Serializer and Deserializer
-// which is used to store common constants and methods used by both.
-class SerDes: public ObjectVisitor {
+// The Serializer/Deserializer class is a common superclass for Serializer and
+// Deserializer which is used to store common constants and methods used by
+// both.
+class SerializerDeserializer: public ObjectVisitor {
+ public:
+ static void Iterate(ObjectVisitor* visitor);
+ static void SetSnapshotCacheSize(int size);
+
protected:
enum DataType {
RAW_DATA_SERIALIZATION = 0,
@@ -200,7 +207,8 @@ class SerDes: public ObjectVisitor {
START_NEW_PAGE_SERIALIZATION = 37,
NATIVES_STRING_RESOURCE = 38,
ROOT_SERIALIZATION = 39,
- // Free: 40-47.
+ PARTIAL_SNAPSHOT_CACHE_ENTRY = 40,
+ // Free: 41-47.
BACKREF_SERIALIZATION = 48,
// One per space, must be kSpaceMask aligned.
// Free: 57-63.
@@ -225,20 +233,28 @@ class SerDes: public ObjectVisitor {
static inline bool SpaceIsPaged(int space) {
return space >= FIRST_PAGED_SPACE && space <= LAST_PAGED_SPACE;
}
+
+ static int partial_snapshot_cache_length_;
+ static const int kPartialSnapshotCacheCapacity = 1024;
+ static Object* partial_snapshot_cache_[];
};
// A Deserializer reads a snapshot and reconstructs the Object graph it defines.
-class Deserializer: public SerDes {
+class Deserializer: public SerializerDeserializer {
public:
// Create a deserializer from a snapshot byte source.
explicit Deserializer(SnapshotByteSource* source);
- virtual ~Deserializer() { }
+ virtual ~Deserializer();
// Deserialize the snapshot into an empty heap.
void Deserialize();
+
+ // Deserialize a single object and the objects reachable from it.
+ void DeserializePartial(Object** root);
+
#ifdef DEBUG
virtual void Synchronize(const char* tag);
#endif
@@ -264,10 +280,10 @@ class Deserializer: public SerDes {
// (In large object space we are keeping track of individual objects
// rather than pages.) In new space we just need the address of the
// first object and the others will flow from that.
- List<Address> pages_[SerDes::kNumberOfSpaces];
+ List<Address> pages_[SerializerDeserializer::kNumberOfSpaces];
SnapshotByteSource* source_;
- ExternalReferenceDecoder* external_reference_decoder_;
+ static ExternalReferenceDecoder* external_reference_decoder_;
// This is the address of the next object that will be allocated in each
// space. It is used to calculate the addresses of back-references.
Address high_water_[LAST_SPACE + 1];
@@ -288,21 +304,71 @@ class SnapshotByteSink {
Put(byte, description);
}
void PutInt(uintptr_t integer, const char* description);
+ virtual int Position() = 0;
};
-class Serializer : public SerDes {
+// Mapping objects to their location after deserialization.
+// This is used during building, but not at runtime by V8.
+class SerializationAddressMapper {
+ public:
+ SerializationAddressMapper()
+ : serialization_map_(new HashMap(&SerializationMatchFun)),
+ no_allocation_(new AssertNoAllocation()) { }
+
+ ~SerializationAddressMapper() {
+ delete serialization_map_;
+ delete no_allocation_;
+ }
+
+ bool IsMapped(HeapObject* obj) {
+ return serialization_map_->Lookup(Key(obj), Hash(obj), false) != NULL;
+ }
+
+ int MappedTo(HeapObject* obj) {
+ ASSERT(IsMapped(obj));
+ return static_cast<int>(reinterpret_cast<intptr_t>(
+ serialization_map_->Lookup(Key(obj), Hash(obj), false)->value));
+ }
+
+ void AddMapping(HeapObject* obj, int to) {
+ ASSERT(!IsMapped(obj));
+ HashMap::Entry* entry =
+ serialization_map_->Lookup(Key(obj), Hash(obj), true);
+ entry->value = Value(to);
+ }
+
+ private:
+ static bool SerializationMatchFun(void* key1, void* key2) {
+ return key1 == key2;
+ }
+
+ static uint32_t Hash(HeapObject* obj) {
+ return static_cast<int32_t>(reinterpret_cast<intptr_t>(obj->address()));
+ }
+
+ static void* Key(HeapObject* obj) {
+ return reinterpret_cast<void*>(obj->address());
+ }
+
+ static void* Value(int v) {
+ return reinterpret_cast<void*>(v);
+ }
+
+ HashMap* serialization_map_;
+ AssertNoAllocation* no_allocation_;
+ DISALLOW_COPY_AND_ASSIGN(SerializationAddressMapper);
+};
+
+
+class Serializer : public SerializerDeserializer {
public:
explicit Serializer(SnapshotByteSink* sink);
- // Serialize the current state of the heap.
- void Serialize();
- // Serialize a single object and the objects reachable from it.
- void SerializePartial(Object** obj);
void VisitPointers(Object** start, Object** end);
// You can call this after serialization to find out how much space was used
// in each space.
int CurrentAllocationAddress(int space) {
- if (SpaceIsLarge(space)) space = LO_SPACE;
+ if (SpaceIsLarge(space)) return large_object_total_;
return fullness_[space];
}
@@ -318,15 +384,20 @@ class Serializer : public SerDes {
// going on.
static void TooLateToEnableNow() { too_late_to_enable_now_ = true; }
static bool enabled() { return serialization_enabled_; }
+ SerializationAddressMapper* address_mapper() { return &address_mapper_; }
#ifdef DEBUG
virtual void Synchronize(const char* tag);
#endif
- private:
+ protected:
enum ReferenceRepresentation {
TAGGED_REPRESENTATION, // A tagged object reference.
CODE_TARGET_REPRESENTATION // A reference to first instruction in target.
};
+ static const int kInvalidRootIndex = -1;
+ virtual int RootIndex(HeapObject* heap_object) = 0;
+ virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) = 0;
+
class ObjectSerializer : public ObjectVisitor {
public:
ObjectSerializer(Serializer* serializer,
@@ -362,7 +433,12 @@ class Serializer : public SerDes {
int bytes_processed_so_far_;
};
- void SerializeObject(Object* o, ReferenceRepresentation representation);
+ virtual void SerializeObject(Object* o,
+ ReferenceRepresentation representation) = 0;
+ void SerializeReferenceToPreviousObject(
+ int space,
+ int address,
+ ReferenceRepresentation reference_representation);
void InitializeAllocators();
// This will return the space for an object. If the object is in large
// object space it may return kLargeCode or kLargeFixedArray in order
@@ -377,8 +453,6 @@ class Serializer : public SerDes {
int EncodeExternalReference(Address addr) {
return external_reference_encoder_->Encode(addr);
}
- int RootIndex(HeapObject* heap_object);
- static const int kInvalidRootIndex = -1;
// Keep track of the fullness of each space in order to generate
// relative addresses for back references. Large objects are
@@ -388,10 +462,11 @@ class Serializer : public SerDes {
SnapshotByteSink* sink_;
int current_root_index_;
ExternalReferenceEncoder* external_reference_encoder_;
- bool partial_;
static bool serialization_enabled_;
// Did we already make use of the fact that serialization was not enabled?
static bool too_late_to_enable_now_;
+ int large_object_total_;
+ SerializationAddressMapper address_mapper_;
friend class ObjectSerializer;
friend class Deserializer;
@@ -399,6 +474,62 @@ class Serializer : public SerDes {
DISALLOW_COPY_AND_ASSIGN(Serializer);
};
+
+class PartialSerializer : public Serializer {
+ public:
+ PartialSerializer(Serializer* startup_snapshot_serializer,
+ SnapshotByteSink* sink)
+ : Serializer(sink),
+ startup_serializer_(startup_snapshot_serializer) {
+ }
+
+ // Serialize the objects reachable from a single object pointer.
+ virtual void Serialize(Object** o);
+ virtual void SerializeObject(Object* o,
+ ReferenceRepresentation representation);
+
+ protected:
+ virtual int RootIndex(HeapObject* o);
+ virtual int PartialSnapshotCacheIndex(HeapObject* o);
+ virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) {
+ return o->IsString() || o->IsSharedFunctionInfo();
+ }
+
+ private:
+ Serializer* startup_serializer_;
+ DISALLOW_COPY_AND_ASSIGN(PartialSerializer);
+};
+
+
+class StartupSerializer : public Serializer {
+ public:
+ explicit StartupSerializer(SnapshotByteSink* sink) : Serializer(sink) {
+ // Clear the cache of objects used by the partial snapshot. After the
+ // strong roots have been serialized we can create a partial snapshot
+ // which will repopulate the cache with objects neede by that partial
+ // snapshot.
+ partial_snapshot_cache_length_ = 0;
+ }
+ // Serialize the current state of the heap. The order is:
+ // 1) Strong references.
+ // 2) Partial snapshot cache.
+ // 3) Weak references (eg the symbol table).
+ virtual void SerializeStrongReferences();
+ virtual void SerializeObject(Object* o,
+ ReferenceRepresentation representation);
+ void SerializeWeakReferences();
+ void Serialize() {
+ SerializeStrongReferences();
+ SerializeWeakReferences();
+ }
+
+ private:
+ virtual int RootIndex(HeapObject* o) { return kInvalidRootIndex; }
+ virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) {
+ return false;
+ }
+};
+
} } // namespace v8::internal
#endif // V8_SERIALIZE_H_
diff --git a/deps/v8/src/snapshot-common.cc b/deps/v8/src/snapshot-common.cc
index c01baad79..1e81b8ece 100644
--- a/deps/v8/src/snapshot-common.cc
+++ b/deps/v8/src/snapshot-common.cc
@@ -59,39 +59,4 @@ bool Snapshot::Initialize(const char* snapshot_file) {
return false;
}
-
-class FileByteSink : public SnapshotByteSink {
- public:
- explicit FileByteSink(const char* snapshot_file) {
- fp_ = OS::FOpen(snapshot_file, "wb");
- if (fp_ == NULL) {
- PrintF("Unable to write to snapshot file \"%s\"\n", snapshot_file);
- exit(1);
- }
- }
- virtual ~FileByteSink() {
- if (fp_ != NULL) {
- fclose(fp_);
- }
- }
- virtual void Put(int byte, const char* description) {
- if (fp_ != NULL) {
- fputc(byte, fp_);
- }
- }
-
- private:
- FILE* fp_;
-};
-
-
-bool Snapshot::WriteToFile(const char* snapshot_file) {
- FileByteSink file(snapshot_file);
- Serializer ser(&file);
- ser.Serialize();
- return true;
-}
-
-
-
} } // namespace v8::internal
diff --git a/deps/v8/src/spaces-inl.h b/deps/v8/src/spaces-inl.h
index 847bb9ada..4fd8a6c8d 100644
--- a/deps/v8/src/spaces-inl.h
+++ b/deps/v8/src/spaces-inl.h
@@ -36,32 +36,6 @@ namespace internal {
// -----------------------------------------------------------------------------
-// HeapObjectIterator
-
-bool HeapObjectIterator::has_next() {
- if (cur_addr_ < cur_limit_) {
- return true; // common case
- }
- ASSERT(cur_addr_ == cur_limit_);
- return HasNextInNextPage(); // slow path
-}
-
-
-HeapObject* HeapObjectIterator::next() {
- ASSERT(has_next());
-
- HeapObject* obj = HeapObject::FromAddress(cur_addr_);
- int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
- ASSERT_OBJECT_SIZE(obj_size);
-
- cur_addr_ += obj_size;
- ASSERT(cur_addr_ <= cur_limit_);
-
- return obj;
-}
-
-
-// -----------------------------------------------------------------------------
// PageIterator
bool PageIterator::has_next() {
diff --git a/deps/v8/src/spaces.cc b/deps/v8/src/spaces.cc
index cd0939800..2c495d852 100644
--- a/deps/v8/src/spaces.cc
+++ b/deps/v8/src/spaces.cc
@@ -82,8 +82,8 @@ void HeapObjectIterator::Initialize(Address cur, Address end,
}
-bool HeapObjectIterator::HasNextInNextPage() {
- if (cur_addr_ == end_addr_) return false;
+HeapObject* HeapObjectIterator::FromNextPage() {
+ if (cur_addr_ == end_addr_) return NULL;
Page* cur_page = Page::FromAllocationTop(cur_addr_);
cur_page = cur_page->next_page();
@@ -92,12 +92,12 @@ bool HeapObjectIterator::HasNextInNextPage() {
cur_addr_ = cur_page->ObjectAreaStart();
cur_limit_ = (cur_page == end_page_) ? end_addr_ : cur_page->AllocationTop();
- if (cur_addr_ == end_addr_) return false;
+ if (cur_addr_ == end_addr_) return NULL;
ASSERT(cur_addr_ < cur_limit_);
#ifdef DEBUG
Verify();
#endif
- return true;
+ return FromCurrentPage();
}
@@ -357,12 +357,18 @@ void* MemoryAllocator::AllocateRawMemory(const size_t requested,
}
int alloced = static_cast<int>(*allocated);
size_ += alloced;
+#ifdef DEBUG
+ ZapBlock(reinterpret_cast<Address>(mem), alloced);
+#endif
Counters::memory_allocated.Increment(alloced);
return mem;
}
void MemoryAllocator::FreeRawMemory(void* mem, size_t length) {
+#ifdef DEBUG
+ ZapBlock(reinterpret_cast<Address>(mem), length);
+#endif
if (CodeRange::contains(static_cast<Address>(mem))) {
CodeRange::FreeRawMemory(mem, length);
} else {
@@ -446,6 +452,9 @@ Page* MemoryAllocator::CommitPages(Address start, size_t size,
if (!initial_chunk_->Commit(start, size, owner->executable() == EXECUTABLE)) {
return Page::FromAddress(NULL);
}
+#ifdef DEBUG
+ ZapBlock(start, size);
+#endif
Counters::memory_allocated.Increment(static_cast<int>(size));
// So long as we correctly overestimated the number of chunks we should not
@@ -467,10 +476,14 @@ bool MemoryAllocator::CommitBlock(Address start,
ASSERT(InInitialChunk(start + size - 1));
if (!initial_chunk_->Commit(start, size, executable)) return false;
+#ifdef DEBUG
+ ZapBlock(start, size);
+#endif
Counters::memory_allocated.Increment(static_cast<int>(size));
return true;
}
+
bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
ASSERT(start != NULL);
ASSERT(size > 0);
@@ -483,6 +496,14 @@ bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
return true;
}
+
+void MemoryAllocator::ZapBlock(Address start, size_t size) {
+ for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
+ Memory::Address_at(start + s) = kZapValue;
+ }
+}
+
+
Page* MemoryAllocator::InitializePagesInChunk(int chunk_id, int pages_in_chunk,
PagedSpace* owner) {
ASSERT(IsValidChunk(chunk_id));
@@ -1437,7 +1458,8 @@ void NewSpace::ClearHistograms() {
void NewSpace::CollectStatistics() {
ClearHistograms();
SemiSpaceIterator it(this);
- while (it.has_next()) RecordAllocation(it.next());
+ for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
+ RecordAllocation(obj);
}
@@ -1598,9 +1620,7 @@ void OldSpaceFreeList::RebuildSizeList() {
int OldSpaceFreeList::Free(Address start, int size_in_bytes) {
#ifdef DEBUG
- for (int i = 0; i < size_in_bytes; i += kPointerSize) {
- Memory::Address_at(start + i) = kZapValue;
- }
+ MemoryAllocator::ZapBlock(start, size_in_bytes);
#endif
FreeListNode* node = FreeListNode::FromAddress(start);
node->set_size(size_in_bytes);
@@ -1732,9 +1752,7 @@ void FixedSizeFreeList::Reset() {
void FixedSizeFreeList::Free(Address start) {
#ifdef DEBUG
- for (int i = 0; i < object_size_; i += kPointerSize) {
- Memory::Address_at(start + i) = kZapValue;
- }
+ MemoryAllocator::ZapBlock(start, object_size_);
#endif
// We only use the freelists with mark-sweep.
ASSERT(!MarkCompactCollector::IsCompacting());
@@ -2054,8 +2072,7 @@ static void CollectCommentStatistics(RelocIterator* it) {
// - by code comment
void PagedSpace::CollectCodeStatistics() {
HeapObjectIterator obj_it(this);
- while (obj_it.has_next()) {
- HeapObject* obj = obj_it.next();
+ for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
if (obj->IsCode()) {
Code* code = Code::cast(obj);
code_kind_statistics[code->kind()] += code->Size();
@@ -2157,7 +2174,8 @@ void OldSpace::ReportStatistics() {
ClearHistograms();
HeapObjectIterator obj_it(this);
- while (obj_it.has_next()) { CollectHistogramInfo(obj_it.next()); }
+ for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
+ CollectHistogramInfo(obj);
ReportHistogram(true);
}
@@ -2393,7 +2411,8 @@ void FixedSpace::ReportStatistics() {
ClearHistograms();
HeapObjectIterator obj_it(this);
- while (obj_it.has_next()) { CollectHistogramInfo(obj_it.next()); }
+ for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
+ CollectHistogramInfo(obj);
ReportHistogram(false);
}
@@ -2462,7 +2481,8 @@ LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space,
HeapObject* LargeObjectIterator::next() {
- ASSERT(has_next());
+ if (current_ == NULL) return NULL;
+
HeapObject* object = current_->GetObject();
current_ = current_->next();
return object;
@@ -2639,8 +2659,7 @@ void LargeObjectSpace::ClearRSet() {
ASSERT(Page::is_rset_in_use());
LargeObjectIterator it(this);
- while (it.has_next()) {
- HeapObject* object = it.next();
+ for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
// We only have code, sequential strings, or fixed arrays in large
// object space, and only fixed arrays need remembered set support.
if (object->IsFixedArray()) {
@@ -2668,11 +2687,10 @@ void LargeObjectSpace::IterateRSet(ObjectSlotCallback copy_object_func) {
30);
LargeObjectIterator it(this);
- while (it.has_next()) {
+ for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
// We only have code, sequential strings, or fixed arrays in large
// object space, and only fixed arrays can possibly contain pointers to
// the young generation.
- HeapObject* object = it.next();
if (object->IsFixedArray()) {
// Iterate the normal page remembered set range.
Page* page = Page::FromAddress(object->address());
@@ -2718,9 +2736,7 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
}
// Free the chunk.
- if (object->IsCode()) {
- LOG(CodeDeleteEvent(object->address()));
- }
+ MarkCompactCollector::ReportDeleteIfNeeded(object);
size_ -= static_cast<int>(chunk_size);
page_count_--;
MemoryAllocator::FreeRawMemory(chunk_address, chunk_size);
@@ -2800,8 +2816,8 @@ void LargeObjectSpace::Verify() {
void LargeObjectSpace::Print() {
LargeObjectIterator it(this);
- while (it.has_next()) {
- it.next()->Print();
+ for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
+ obj->Print();
}
}
@@ -2811,9 +2827,9 @@ void LargeObjectSpace::ReportStatistics() {
int num_objects = 0;
ClearHistograms();
LargeObjectIterator it(this);
- while (it.has_next()) {
+ for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
num_objects++;
- CollectHistogramInfo(it.next());
+ CollectHistogramInfo(obj);
}
PrintF(" number of objects %d\n", num_objects);
@@ -2823,8 +2839,7 @@ void LargeObjectSpace::ReportStatistics() {
void LargeObjectSpace::CollectCodeStatistics() {
LargeObjectIterator obj_it(this);
- while (obj_it.has_next()) {
- HeapObject* obj = obj_it.next();
+ for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
if (obj->IsCode()) {
Code* code = Code::cast(obj);
code_kind_statistics[code->kind()] += code->Size();
@@ -2835,8 +2850,7 @@ void LargeObjectSpace::CollectCodeStatistics() {
void LargeObjectSpace::PrintRSet() {
LargeObjectIterator it(this);
- while (it.has_next()) {
- HeapObject* object = it.next();
+ for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
if (object->IsFixedArray()) {
Page* page = Page::FromAddress(object->address());
diff --git a/deps/v8/src/spaces.h b/deps/v8/src/spaces.h
index 4786fb4dd..850a72366 100644
--- a/deps/v8/src/spaces.h
+++ b/deps/v8/src/spaces.h
@@ -438,13 +438,16 @@ class MemoryAllocator : public AllStatic {
// and false otherwise.
static bool CommitBlock(Address start, size_t size, Executability executable);
-
// Uncommit a contiguous block of memory [start..(start+size)[.
// start is not NULL, the size is greater than zero, and the
// block is contained in the initial chunk. Returns true if it succeeded
// and false otherwise.
static bool UncommitBlock(Address start, size_t size);
+ // Zaps a contiguous block of memory [start..(start+size)[ thus
+ // filling it up with a recognizable non-NULL bit pattern.
+ static void ZapBlock(Address start, size_t size);
+
// Attempts to allocate the requested (non-zero) number of pages from the
// OS. Fewer pages might be allocated than requested. If it fails to
// allocate memory for the OS or cannot allocate a single page, this
@@ -597,15 +600,14 @@ class MemoryAllocator : public AllStatic {
// Interface for heap object iterator to be implemented by all object space
// object iterators.
//
-// NOTE: The space specific object iterators also implements the own has_next()
-// and next() methods which are used to avoid using virtual functions
+// NOTE: The space specific object iterators also implements the own next()
+// method which is used to avoid using virtual functions
// iterating a specific space.
class ObjectIterator : public Malloced {
public:
virtual ~ObjectIterator() { }
- virtual bool has_next_object() = 0;
virtual HeapObject* next_object() = 0;
};
@@ -645,11 +647,11 @@ class HeapObjectIterator: public ObjectIterator {
Address start,
HeapObjectCallback size_func);
- inline bool has_next();
- inline HeapObject* next();
+ inline HeapObject* next() {
+ return (cur_addr_ < cur_limit_) ? FromCurrentPage() : FromNextPage();
+ }
// implementation of ObjectIterator.
- virtual bool has_next_object() { return has_next(); }
virtual HeapObject* next_object() { return next(); }
private:
@@ -659,9 +661,21 @@ class HeapObjectIterator: public ObjectIterator {
HeapObjectCallback size_func_; // size function
Page* end_page_; // caches the page of the end address
- // Slow path of has_next, checks whether there are more objects in
- // the next page.
- bool HasNextInNextPage();
+ HeapObject* FromCurrentPage() {
+ ASSERT(cur_addr_ < cur_limit_);
+
+ HeapObject* obj = HeapObject::FromAddress(cur_addr_);
+ int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
+ ASSERT_OBJECT_SIZE(obj_size);
+
+ cur_addr_ += obj_size;
+ ASSERT(cur_addr_ <= cur_limit_);
+
+ return obj;
+ }
+
+ // Slow path of next, goes into the next page.
+ HeapObject* FromNextPage();
// Initializes fields.
void Initialize(Address start, Address end, HeapObjectCallback size_func);
@@ -982,6 +996,18 @@ class PagedSpace : public Space {
return Page::FromAllocationTop(alloc_info.limit);
}
+ int CountPagesToTop() {
+ Page* p = Page::FromAllocationTop(allocation_info_.top);
+ PageIterator it(this, PageIterator::ALL_PAGES);
+ int counter = 1;
+ while (it.has_next()) {
+ if (it.next() == p) return counter;
+ counter++;
+ }
+ UNREACHABLE();
+ return -1;
+ }
+
// Expands the space by allocating a fixed number of pages. Returns false if
// it cannot allocate requested number of pages from OS. Newly allocated
// pages are append to the last_page;
@@ -1194,10 +1220,8 @@ class SemiSpaceIterator : public ObjectIterator {
SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func);
SemiSpaceIterator(NewSpace* space, Address start);
- bool has_next() {return current_ < limit_; }
-
HeapObject* next() {
- ASSERT(has_next());
+ if (current_ == limit_) return NULL;
HeapObject* object = HeapObject::FromAddress(current_);
int size = (size_func_ == NULL) ? object->Size() : size_func_(object);
@@ -1207,7 +1231,6 @@ class SemiSpaceIterator : public ObjectIterator {
}
// Implementation of the ObjectIterator functions.
- virtual bool has_next_object() { return has_next(); }
virtual HeapObject* next_object() { return next(); }
private:
@@ -1753,8 +1776,11 @@ class FixedSpace : public PagedSpace {
class MapSpace : public FixedSpace {
public:
// Creates a map space object with a maximum capacity.
- MapSpace(int max_capacity, AllocationSpace id)
- : FixedSpace(max_capacity, id, Map::kSize, "map") {}
+ MapSpace(int max_capacity, int max_map_space_pages, AllocationSpace id)
+ : FixedSpace(max_capacity, id, Map::kSize, "map"),
+ max_map_space_pages_(max_map_space_pages) {
+ ASSERT(max_map_space_pages < kMaxMapPageIndex);
+ }
// Prepares for a mark-compact GC.
virtual void PrepareForMarkCompact(bool will_compact);
@@ -1762,24 +1788,21 @@ class MapSpace : public FixedSpace {
// Given an index, returns the page address.
Address PageAddress(int page_index) { return page_addresses_[page_index]; }
- // Constants.
- static const int kMaxMapPageIndex = (1 << MapWord::kMapPageIndexBits) - 1;
+ static const int kMaxMapPageIndex = 1 << MapWord::kMapPageIndexBits;
// Are map pointers encodable into map word?
bool MapPointersEncodable() {
if (!FLAG_use_big_map_space) {
- ASSERT(CountTotalPages() <= kMaxMapPageIndex);
+ ASSERT(CountPagesToTop() <= kMaxMapPageIndex);
return true;
}
- int n_of_pages = Capacity() / Page::kObjectAreaSize;
- ASSERT(n_of_pages == CountTotalPages());
- return n_of_pages <= kMaxMapPageIndex;
+ return CountPagesToTop() <= max_map_space_pages_;
}
// Should be called after forced sweep to find out if map space needs
// compaction.
bool NeedsCompaction(int live_maps) {
- return !MapPointersEncodable() && live_maps <= kCompactionThreshold;
+ return !MapPointersEncodable() && live_maps <= CompactionThreshold();
}
Address TopAfterCompaction(int live_maps) {
@@ -1817,7 +1840,7 @@ class MapSpace : public FixedSpace {
#ifdef DEBUG
if (FLAG_enable_slow_asserts) {
- int actual_size = 0;
+ intptr_t actual_size = 0;
for (Page* p = first_page_; p != top_page; p = p->next_page())
actual_size += kMapsPerPage * Map::kSize;
actual_size += (new_top - top_page->ObjectAreaStart());
@@ -1838,10 +1861,14 @@ class MapSpace : public FixedSpace {
static const int kMapsPerPage = Page::kObjectAreaSize / Map::kSize;
// Do map space compaction if there is a page gap.
- static const int kCompactionThreshold = kMapsPerPage * (kMaxMapPageIndex - 1);
+ int CompactionThreshold() {
+ return kMapsPerPage * (max_map_space_pages_ - 1);
+ }
+
+ const int max_map_space_pages_;
// An array of page start address in a map space.
- Address page_addresses_[kMaxMapPageIndex + 1];
+ Address page_addresses_[kMaxMapPageIndex];
public:
TRACK_MEMORY("MapSpace")
@@ -2036,11 +2063,9 @@ class LargeObjectIterator: public ObjectIterator {
explicit LargeObjectIterator(LargeObjectSpace* space);
LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func);
- bool has_next() { return current_ != NULL; }
HeapObject* next();
// implementation of ObjectIterator.
- virtual bool has_next_object() { return has_next(); }
virtual HeapObject* next_object() { return next(); }
private:
diff --git a/deps/v8/src/stub-cache.cc b/deps/v8/src/stub-cache.cc
index 9ab83beba..81f89fd4b 100644
--- a/deps/v8/src/stub-cache.cc
+++ b/deps/v8/src/stub-cache.cc
@@ -1058,6 +1058,19 @@ Object* StubCompiler::GetCodeWithFlags(Code::Flags flags, String* name) {
return GetCodeWithFlags(flags, reinterpret_cast<char*>(NULL));
}
+void StubCompiler::LookupPostInterceptor(JSObject* holder,
+ String* name,
+ LookupResult* lookup) {
+ holder->LocalLookupRealNamedProperty(name, lookup);
+ if (lookup->IsNotFound()) {
+ Object* proto = holder->GetPrototype();
+ if (proto != Heap::null_value()) {
+ proto->Lookup(name, lookup);
+ }
+ }
+}
+
+
Object* LoadStubCompiler::GetCode(PropertyType type, String* name) {
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, type);
diff --git a/deps/v8/src/stub-cache.h b/deps/v8/src/stub-cache.h
index 2418c1f76..d97fe7732 100644
--- a/deps/v8/src/stub-cache.h
+++ b/deps/v8/src/stub-cache.h
@@ -435,6 +435,10 @@ class StubCompiler BASE_EMBEDDED {
String* name,
Label* miss);
+ static void LookupPostInterceptor(JSObject* holder,
+ String* name,
+ LookupResult* lookup);
+
private:
HandleScope scope_;
MacroAssembler masm_;
diff --git a/deps/v8/src/v8-counters.h b/deps/v8/src/v8-counters.h
index fb1e9265e..7397c3044 100644
--- a/deps/v8/src/v8-counters.h
+++ b/deps/v8/src/v8-counters.h
@@ -60,40 +60,44 @@ namespace internal {
// lines) rather than one macro (of length about 80 lines) to work around
// this problem. Please avoid using recursive macros of this length when
// possible.
-#define STATS_COUNTER_LIST_1(SC) \
- /* Global Handle Count*/ \
- SC(global_handles, V8.GlobalHandles) \
- /* Mallocs from PCRE */ \
- SC(pcre_mallocs, V8.PcreMallocCount) \
- /* OS Memory allocated */ \
- SC(memory_allocated, V8.OsMemoryAllocated) \
- SC(props_to_dictionary, V8.ObjectPropertiesToDictionary) \
- SC(elements_to_dictionary, V8.ObjectElementsToDictionary) \
- SC(alive_after_last_gc, V8.AliveAfterLastGC) \
- SC(objs_since_last_young, V8.ObjsSinceLastYoung) \
- SC(objs_since_last_full, V8.ObjsSinceLastFull) \
- SC(symbol_table_capacity, V8.SymbolTableCapacity) \
- SC(number_of_symbols, V8.NumberOfSymbols) \
- SC(script_wrappers, V8.ScriptWrappers) \
- SC(call_initialize_stubs, V8.CallInitializeStubs) \
- SC(call_premonomorphic_stubs, V8.CallPreMonomorphicStubs) \
- SC(call_normal_stubs, V8.CallNormalStubs) \
- SC(call_megamorphic_stubs, V8.CallMegamorphicStubs) \
- SC(arguments_adaptors, V8.ArgumentsAdaptors) \
- SC(compilation_cache_hits, V8.CompilationCacheHits) \
- SC(compilation_cache_misses, V8.CompilationCacheMisses) \
- SC(regexp_cache_hits, V8.RegExpCacheHits) \
- SC(regexp_cache_misses, V8.RegExpCacheMisses) \
- /* Amount of evaled source code. */ \
- SC(total_eval_size, V8.TotalEvalSize) \
- /* Amount of loaded source code. */ \
- SC(total_load_size, V8.TotalLoadSize) \
- /* Amount of parsed source code. */ \
- SC(total_parse_size, V8.TotalParseSize) \
- /* Amount of source code skipped over using preparsing. */ \
- SC(total_preparse_skipped, V8.TotalPreparseSkipped) \
- /* Amount of compiled source code. */ \
- SC(total_compile_size, V8.TotalCompileSize)
+#define STATS_COUNTER_LIST_1(SC) \
+ /* Global Handle Count*/ \
+ SC(global_handles, V8.GlobalHandles) \
+ /* Mallocs from PCRE */ \
+ SC(pcre_mallocs, V8.PcreMallocCount) \
+ /* OS Memory allocated */ \
+ SC(memory_allocated, V8.OsMemoryAllocated) \
+ SC(props_to_dictionary, V8.ObjectPropertiesToDictionary) \
+ SC(elements_to_dictionary, V8.ObjectElementsToDictionary) \
+ SC(alive_after_last_gc, V8.AliveAfterLastGC) \
+ SC(objs_since_last_young, V8.ObjsSinceLastYoung) \
+ SC(objs_since_last_full, V8.ObjsSinceLastFull) \
+ SC(symbol_table_capacity, V8.SymbolTableCapacity) \
+ SC(number_of_symbols, V8.NumberOfSymbols) \
+ SC(script_wrappers, V8.ScriptWrappers) \
+ SC(call_initialize_stubs, V8.CallInitializeStubs) \
+ SC(call_premonomorphic_stubs, V8.CallPreMonomorphicStubs) \
+ SC(call_normal_stubs, V8.CallNormalStubs) \
+ SC(call_megamorphic_stubs, V8.CallMegamorphicStubs) \
+ SC(arguments_adaptors, V8.ArgumentsAdaptors) \
+ SC(compilation_cache_hits, V8.CompilationCacheHits) \
+ SC(compilation_cache_misses, V8.CompilationCacheMisses) \
+ SC(regexp_cache_hits, V8.RegExpCacheHits) \
+ SC(regexp_cache_misses, V8.RegExpCacheMisses) \
+ /* Amount of evaled source code. */ \
+ SC(total_eval_size, V8.TotalEvalSize) \
+ /* Amount of loaded source code. */ \
+ SC(total_load_size, V8.TotalLoadSize) \
+ /* Amount of parsed source code. */ \
+ SC(total_parse_size, V8.TotalParseSize) \
+ /* Amount of source code skipped over using preparsing. */ \
+ SC(total_preparse_skipped, V8.TotalPreparseSkipped) \
+ /* Amount of compiled source code. */ \
+ SC(total_compile_size, V8.TotalCompileSize) \
+ /* Amount of source code compiled with the old codegen. */ \
+ SC(total_old_codegen_source_size, V8.TotalOldCodegenSourceSize) \
+ /* Amount of source code compiled with the full codegen. */ \
+ SC(total_full_codegen_source_size, V8.TotalFullCodegenSourceSize)
#define STATS_COUNTER_LIST_2(SC) \
diff --git a/deps/v8/src/v8natives.js b/deps/v8/src/v8natives.js
index 700b9e47e..74750653d 100644
--- a/deps/v8/src/v8natives.js
+++ b/deps/v8/src/v8natives.js
@@ -197,7 +197,7 @@ $Object.prototype.constructor = $Object;
// ECMA-262 - 15.2.4.2
function ObjectToString() {
- return "[object " + %_ClassOf(this) + "]";
+ return "[object " + %_ClassOf(ToObject(this)) + "]";
}
@@ -209,7 +209,7 @@ function ObjectToLocaleString() {
// ECMA-262 - 15.2.4.4
function ObjectValueOf() {
- return this;
+ return ToObject(this);
}
@@ -276,7 +276,7 @@ function ObjectLookupSetter(name) {
function ObjectKeys(obj) {
if ((!IS_OBJECT(obj) || IS_NULL_OR_UNDEFINED(obj)) && !IS_FUNCTION(obj))
- throw MakeTypeError('object_keys_non_object', [obj]);
+ throw MakeTypeError("obj_ctor_property_non_object", ["keys"]);
return %LocalKeys(obj);
}
@@ -493,23 +493,59 @@ function DefineOwnProperty(obj, p, desc, should_throw) {
// ES5 section 15.2.3.2.
function ObjectGetPrototypeOf(obj) {
- if (!IS_OBJECT(obj) && !IS_FUNCTION(obj)) {
- throw MakeTypeError("object_get_prototype_non_object", [obj]);
- }
- return obj.__proto__;
+ if ((!IS_OBJECT(obj) || IS_NULL_OR_UNDEFINED(obj)) && !IS_FUNCTION(obj))
+ throw MakeTypeError("obj_ctor_property_non_object", ["getPrototypeOf"]);
+ return obj.__proto__;
}
// ES5 section 15.2.3.3
function ObjectGetOwnPropertyDescriptor(obj, p) {
- if (!IS_OBJECT(obj) && !IS_FUNCTION(obj)) {
- throw MakeTypeError("object_get_prototype_non_object", [obj]);
- }
+ if ((!IS_OBJECT(obj) || IS_NULL_OR_UNDEFINED(obj)) && !IS_FUNCTION(obj))
+ throw MakeTypeError("obj_ctor_property_non_object", ["getOwnPropertyDescriptor"]);
var desc = GetOwnProperty(obj, p);
return FromPropertyDescriptor(desc);
}
+// ES5 section 15.2.3.4.
+function ObjectGetOwnPropertyNames(obj) {
+ if ((!IS_OBJECT(obj) || IS_NULL_OR_UNDEFINED(obj)) && !IS_FUNCTION(obj))
+ throw MakeTypeError("obj_ctor_property_non_object", ["getOwnPropertyNames"]);
+
+ // Find all the indexed properties.
+
+ // Get the local element names.
+ var propertyNames = %GetLocalElementNames(obj);
+
+ // Get names for indexed interceptor properties.
+ if (%GetInterceptorInfo(obj) & 1) {
+ var indexedInterceptorNames =
+ %GetIndexedInterceptorElementNames(obj);
+ if (indexedInterceptorNames) {
+ propertyNames = propertyNames.concat(indexedInterceptorNames);
+ }
+ }
+
+ // Find all the named properties.
+
+ // Get the local property names.
+ propertyNames = propertyNames.concat(%GetLocalPropertyNames(obj));
+
+ // Get names for named interceptor properties if any.
+
+ if (%GetInterceptorInfo(obj) & 2) {
+ var namedInterceptorNames =
+ %GetNamedInterceptorPropertyNames(obj);
+ if (namedInterceptorNames) {
+ propertyNames = propertyNames.concat(namedInterceptorNames);
+ }
+ }
+
+ return propertyNames;
+}
+
+
// ES5 section 15.2.3.5.
function ObjectCreate(proto, properties) {
if (!IS_OBJECT(proto) && !IS_NULL(proto)) {
@@ -576,7 +612,8 @@ function SetupObject() {
"keys", ObjectKeys,
"create", ObjectCreate,
"getPrototypeOf", ObjectGetPrototypeOf,
- "getOwnPropertyDescriptor", ObjectGetOwnPropertyDescriptor
+ "getOwnPropertyDescriptor", ObjectGetOwnPropertyDescriptor,
+ "getOwnPropertyNames", ObjectGetOwnPropertyNames
));
}
diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc
index 00052d1cb..7583d1ca0 100644
--- a/deps/v8/src/version.cc
+++ b/deps/v8/src/version.cc
@@ -33,9 +33,9 @@
// NOTE these macros are used by the SCons build script so their names
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 2
-#define MINOR_VERSION 0
-#define BUILD_NUMBER 6
-#define PATCH_LEVEL 1
+#define MINOR_VERSION 1
+#define BUILD_NUMBER 0
+#define PATCH_LEVEL 0
#define CANDIDATE_VERSION false
// Define SONAME to have the SCons build the put a specific SONAME into the
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc
index 2d524eaf4..9cfe98abe 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/x64/assembler-x64.cc
@@ -1537,6 +1537,40 @@ void Assembler::movzxwl(Register dst, const Operand& src) {
}
+void Assembler::repmovsb() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xF3);
+ emit(0xA4);
+}
+
+
+void Assembler::repmovsw() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0x66); // Operand size override.
+ emit(0xF3);
+ emit(0xA4);
+}
+
+
+void Assembler::repmovsl() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xF3);
+ emit(0xA5);
+}
+
+
+void Assembler::repmovsq() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xF3);
+ emit_rex_64();
+ emit(0xA5);
+}
+
+
void Assembler::mul(Register src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -1880,6 +1914,20 @@ void Assembler::testb(const Operand& op, Immediate mask) {
}
+void Assembler::testb(const Operand& op, Register reg) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ if (reg.code() > 3) {
+ // Register is not one of al, bl, cl, dl. Its encoding needs REX.
+ emit_rex_32(reg, op);
+ } else {
+ emit_optional_rex_32(reg, op);
+ }
+ emit(0x84);
+ emit_operand(reg, op);
+}
+
+
void Assembler::testl(Register dst, Register src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -2065,6 +2113,16 @@ void Assembler::fisttp_s(const Operand& adr) {
}
+void Assembler::fisttp_d(const Operand& adr) {
+ ASSERT(CpuFeatures::IsEnabled(SSE3));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_optional_rex_32(adr);
+ emit(0xDD);
+ emit_operand(1, adr);
+}
+
+
void Assembler::fist_s(const Operand& adr) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index fa7d33b1b..3f2aef0e4 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -574,6 +574,13 @@ class Assembler : public Malloced {
void movzxwq(Register dst, const Operand& src);
void movzxwl(Register dst, const Operand& src);
+ // Repeated moves.
+
+ void repmovsb();
+ void repmovsw();
+ void repmovsl();
+ void repmovsq();
+
// New x64 instruction to load from an immediate 64-bit pointer into RAX.
void load_rax(void* ptr, RelocInfo::Mode rmode);
void load_rax(ExternalReference ext);
@@ -738,6 +745,9 @@ class Assembler : public Malloced {
arithmetic_op_32(0x23, dst, src);
}
+ void andb(Register dst, Immediate src) {
+ immediate_arithmetic_op_8(0x4, dst, src);
+ }
void decq(Register dst);
void decq(const Operand& dst);
@@ -916,6 +926,10 @@ class Assembler : public Malloced {
arithmetic_op_32(0x2B, dst, src);
}
+ void subl(Register dst, const Operand& src) {
+ arithmetic_op_32(0x2B, dst, src);
+ }
+
void subl(const Operand& dst, Immediate src) {
immediate_arithmetic_op_32(0x5, dst, src);
}
@@ -931,6 +945,7 @@ class Assembler : public Malloced {
void testb(Register dst, Register src);
void testb(Register reg, Immediate mask);
void testb(const Operand& op, Immediate mask);
+ void testb(const Operand& op, Register reg);
void testl(Register dst, Register src);
void testl(Register reg, Immediate mask);
void testl(const Operand& op, Immediate mask);
@@ -1047,6 +1062,7 @@ class Assembler : public Malloced {
void fistp_d(const Operand& adr);
void fisttp_s(const Operand& adr);
+ void fisttp_d(const Operand& adr);
void fabs();
void fchs();
diff --git a/deps/v8/src/x64/builtins-x64.cc b/deps/v8/src/x64/builtins-x64.cc
index f444d2cf8..0b95bba60 100644
--- a/deps/v8/src/x64/builtins-x64.cc
+++ b/deps/v8/src/x64/builtins-x64.cc
@@ -34,16 +34,36 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-void Builtins::Generate_Adaptor(MacroAssembler* masm, CFunctionId id) {
- // TODO(428): Don't pass the function in a static variable.
- ExternalReference passed = ExternalReference::builtin_passed_function();
- __ movq(kScratchRegister, passed.address(), RelocInfo::EXTERNAL_REFERENCE);
- __ movq(Operand(kScratchRegister, 0), rdi);
-
- // The actual argument count has already been loaded into register
- // rax, but JumpToRuntime expects rax to contain the number of
- // arguments including the receiver.
- __ incq(rax);
+
+void Builtins::Generate_Adaptor(MacroAssembler* masm,
+ CFunctionId id,
+ BuiltinExtraArguments extra_args) {
+ // ----------- S t a t e -------------
+ // -- rax : number of arguments excluding receiver
+ // -- rdi : called function (only guaranteed when
+ // extra_args requires it)
+ // -- rsi : context
+ // -- rsp[0] : return address
+ // -- rsp[8] : last argument
+ // -- ...
+ // -- rsp[8 * argc] : first argument (argc == rax)
+ // -- rsp[8 * (argc +1)] : receiver
+ // -----------------------------------
+
+ // Insert extra arguments.
+ int num_extra_args = 0;
+ if (extra_args == NEEDS_CALLED_FUNCTION) {
+ num_extra_args = 1;
+ __ pop(kScratchRegister); // Save return address.
+ __ push(rdi);
+ __ push(kScratchRegister); // Restore return address.
+ } else {
+ ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
+ }
+
+ // JumpToRuntime expects rax to contain the number of arguments
+ // including the receiver and the extra arguments.
+ __ addq(rax, Immediate(num_extra_args + 1));
__ JumpToRuntime(ExternalReference(id), 1);
}
@@ -888,7 +908,8 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
}
-void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
+static void Generate_JSConstructStubHelper(MacroAssembler* masm,
+ bool is_api_function) {
// Enter a construct frame.
__ EnterConstructFrame();
@@ -1091,8 +1112,17 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ j(greater_equal, &loop);
// Call the function.
- ParameterCount actual(rax);
- __ InvokeFunction(rdi, actual, CALL_FUNCTION);
+ if (is_api_function) {
+ __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+ Handle<Code> code = Handle<Code>(
+ Builtins::builtin(Builtins::HandleApiCallConstruct));
+ ParameterCount expected(0);
+ __ InvokeCode(code, expected, expected,
+ RelocInfo::CODE_TARGET, CALL_FUNCTION);
+ } else {
+ ParameterCount actual(rax);
+ __ InvokeFunction(rdi, actual, CALL_FUNCTION);
+ }
// Restore context from the frame.
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
@@ -1129,6 +1159,16 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
}
+void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, false);
+}
+
+
+void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, true);
+}
+
+
static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
bool is_construct) {
// Expects five C++ function parameters.
diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc
index e912bbcff..685c9286d 100644
--- a/deps/v8/src/x64/codegen-x64.cc
+++ b/deps/v8/src/x64/codegen-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -33,6 +33,7 @@
#include "debug.h"
#include "ic-inl.h"
#include "parser.h"
+#include "regexp-macro-assembler.h"
#include "register-allocator-inl.h"
#include "scopes.h"
@@ -207,50 +208,51 @@ class FloatingPointHelper : public AllStatic {
// Code pattern for loading floating point values. Input values must
// be either smi or heap number objects (fp values). Requirements:
- // operand_1 on TOS+1 , operand_2 on TOS+2; Returns operands as
+ // operand_1 in rdx, operand_2 in rax; Returns operands as
// floating point numbers in XMM registers.
static void LoadFloatOperands(MacroAssembler* masm,
XMMRegister dst1,
XMMRegister dst2);
+ // Similar to LoadFloatOperands, assumes that the operands are smis.
+ static void LoadFloatOperandsFromSmis(MacroAssembler* masm,
+ XMMRegister dst1,
+ XMMRegister dst2);
+
// Code pattern for loading floating point values onto the fp stack.
// Input values must be either smi or heap number objects (fp values).
// Requirements:
// Register version: operands in registers lhs and rhs.
// Stack version: operands on TOS+1 and TOS+2.
// Returns operands as floating point numbers on fp stack.
- static void LoadFloatOperands(MacroAssembler* masm);
static void LoadFloatOperands(MacroAssembler* masm,
Register lhs,
Register rhs);
- // Code pattern for loading a floating point value and converting it
- // to a 32 bit integer. Input value must be either a smi or a heap number
- // object.
- // Returns operands as 32-bit sign extended integers in a general purpose
- // registers.
- static void LoadInt32Operand(MacroAssembler* masm,
- const Operand& src,
- Register dst);
-
// Test if operands are smi or number objects (fp). Requirements:
// operand_1 in rax, operand_2 in rdx; falls through on float or smi
// operands, jumps to the non_float label otherwise.
static void CheckNumberOperands(MacroAssembler* masm,
Label* non_float);
+
+ // Takes the operands in rdx and rax and loads them as integers in rax
+ // and rcx.
+ static void LoadAsIntegers(MacroAssembler* masm,
+ bool use_sse3,
+ Label* operand_conversion_failure);
};
// -----------------------------------------------------------------------------
// CodeGenerator implementation.
-CodeGenerator::CodeGenerator(int buffer_size,
+CodeGenerator::CodeGenerator(MacroAssembler* masm,
Handle<Script> script,
bool is_eval)
: is_eval_(is_eval),
script_(script),
deferred_(8),
- masm_(new MacroAssembler(NULL, buffer_size)),
+ masm_(masm),
scope_(NULL),
frame_(NULL),
allocator_(NULL),
@@ -276,7 +278,9 @@ void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
}
-void CodeGenerator::GenCode(FunctionLiteral* function) {
+void CodeGenerator::Generate(FunctionLiteral* function,
+ Mode mode,
+ CompilationInfo* info) {
// Record the position for debugging purposes.
CodeForFunctionPosition(function);
ZoneList<Statement*>* body = function->body();
@@ -292,7 +296,7 @@ void CodeGenerator::GenCode(FunctionLiteral* function) {
set_in_spilled_code(false);
// Adjust for function-level loop nesting.
- loop_nesting_ += function->loop_nesting();
+ loop_nesting_ += info->loop_nesting();
JumpTarget::set_compiling_deferred_code(false);
@@ -316,96 +320,106 @@ void CodeGenerator::GenCode(FunctionLiteral* function) {
// rdi: called JS function
// rsi: callee's context
allocator_->Initialize();
- frame_->Enter();
- // Allocate space for locals and initialize them.
- frame_->AllocateStackSlots();
- // Initialize the function return target after the locals are set
- // up, because it needs the expected frame height from the frame.
- function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
- function_return_is_shadowed_ = false;
-
- // Allocate the local context if needed.
- int heap_slots = scope_->num_heap_slots();
- if (heap_slots > 0) {
- Comment cmnt(masm_, "[ allocate local context");
- // Allocate local context.
- // Get outer context and create a new context based on it.
- frame_->PushFunction();
- Result context;
- if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(heap_slots);
- context = frame_->CallStub(&stub, 1);
- } else {
- context = frame_->CallRuntime(Runtime::kNewContext, 1);
- }
+ if (mode == PRIMARY) {
+ frame_->Enter();
+
+ // Allocate space for locals and initialize them.
+ frame_->AllocateStackSlots();
+
+ // Allocate the local context if needed.
+ int heap_slots = scope_->num_heap_slots();
+ if (heap_slots > 0) {
+ Comment cmnt(masm_, "[ allocate local context");
+ // Allocate local context.
+ // Get outer context and create a new context based on it.
+ frame_->PushFunction();
+ Result context;
+ if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(heap_slots);
+ context = frame_->CallStub(&stub, 1);
+ } else {
+ context = frame_->CallRuntime(Runtime::kNewContext, 1);
+ }
- // Update context local.
- frame_->SaveContextRegister();
+ // Update context local.
+ frame_->SaveContextRegister();
- // Verify that the runtime call result and rsi agree.
- if (FLAG_debug_code) {
- __ cmpq(context.reg(), rsi);
- __ Assert(equal, "Runtime::NewContext should end up in rsi");
+ // Verify that the runtime call result and rsi agree.
+ if (FLAG_debug_code) {
+ __ cmpq(context.reg(), rsi);
+ __ Assert(equal, "Runtime::NewContext should end up in rsi");
+ }
}
- }
- // TODO(1241774): Improve this code:
- // 1) only needed if we have a context
- // 2) no need to recompute context ptr every single time
- // 3) don't copy parameter operand code from SlotOperand!
- {
- Comment cmnt2(masm_, "[ copy context parameters into .context");
-
- // Note that iteration order is relevant here! If we have the same
- // parameter twice (e.g., function (x, y, x)), and that parameter
- // needs to be copied into the context, it must be the last argument
- // passed to the parameter that needs to be copied. This is a rare
- // case so we don't check for it, instead we rely on the copying
- // order: such a parameter is copied repeatedly into the same
- // context location and thus the last value is what is seen inside
- // the function.
- for (int i = 0; i < scope_->num_parameters(); i++) {
- Variable* par = scope_->parameter(i);
- Slot* slot = par->slot();
- if (slot != NULL && slot->type() == Slot::CONTEXT) {
- // The use of SlotOperand below is safe in unspilled code
- // because the slot is guaranteed to be a context slot.
- //
- // There are no parameters in the global scope.
- ASSERT(!scope_->is_global_scope());
- frame_->PushParameterAt(i);
- Result value = frame_->Pop();
- value.ToRegister();
-
- // SlotOperand loads context.reg() with the context object
- // stored to, used below in RecordWrite.
- Result context = allocator_->Allocate();
- ASSERT(context.is_valid());
- __ movq(SlotOperand(slot, context.reg()), value.reg());
- int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
- Result scratch = allocator_->Allocate();
- ASSERT(scratch.is_valid());
- frame_->Spill(context.reg());
- frame_->Spill(value.reg());
- __ RecordWrite(context.reg(), offset, value.reg(), scratch.reg());
+ // TODO(1241774): Improve this code:
+ // 1) only needed if we have a context
+ // 2) no need to recompute context ptr every single time
+ // 3) don't copy parameter operand code from SlotOperand!
+ {
+ Comment cmnt2(masm_, "[ copy context parameters into .context");
+
+ // Note that iteration order is relevant here! If we have the same
+ // parameter twice (e.g., function (x, y, x)), and that parameter
+ // needs to be copied into the context, it must be the last argument
+ // passed to the parameter that needs to be copied. This is a rare
+ // case so we don't check for it, instead we rely on the copying
+ // order: such a parameter is copied repeatedly into the same
+ // context location and thus the last value is what is seen inside
+ // the function.
+ for (int i = 0; i < scope_->num_parameters(); i++) {
+ Variable* par = scope_->parameter(i);
+ Slot* slot = par->slot();
+ if (slot != NULL && slot->type() == Slot::CONTEXT) {
+ // The use of SlotOperand below is safe in unspilled code
+ // because the slot is guaranteed to be a context slot.
+ //
+ // There are no parameters in the global scope.
+ ASSERT(!scope_->is_global_scope());
+ frame_->PushParameterAt(i);
+ Result value = frame_->Pop();
+ value.ToRegister();
+
+ // SlotOperand loads context.reg() with the context object
+ // stored to, used below in RecordWrite.
+ Result context = allocator_->Allocate();
+ ASSERT(context.is_valid());
+ __ movq(SlotOperand(slot, context.reg()), value.reg());
+ int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
+ Result scratch = allocator_->Allocate();
+ ASSERT(scratch.is_valid());
+ frame_->Spill(context.reg());
+ frame_->Spill(value.reg());
+ __ RecordWrite(context.reg(), offset, value.reg(), scratch.reg());
+ }
}
}
- }
- // Store the arguments object. This must happen after context
- // initialization because the arguments object may be stored in
- // the context.
- if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) {
- StoreArgumentsObject(true);
- }
+ // Store the arguments object. This must happen after context
+ // initialization because the arguments object may be stored in
+ // the context.
+ if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) {
+ StoreArgumentsObject(true);
+ }
- // Initialize ThisFunction reference if present.
- if (scope_->is_function_scope() && scope_->function() != NULL) {
- frame_->Push(Factory::the_hole_value());
- StoreToSlot(scope_->function()->slot(), NOT_CONST_INIT);
+ // Initialize ThisFunction reference if present.
+ if (scope_->is_function_scope() && scope_->function() != NULL) {
+ frame_->Push(Factory::the_hole_value());
+ StoreToSlot(scope_->function()->slot(), NOT_CONST_INIT);
+ }
+ } else {
+ // When used as the secondary compiler for splitting, rbp, rsi,
+ // and rdi have been pushed on the stack. Adjust the virtual
+ // frame to match this state.
+ frame_->Adjust(3);
+ allocator_->Unuse(rdi);
}
+ // Initialize the function return target after the locals are set
+ // up, because it needs the expected frame height from the frame.
+ function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
+ function_return_is_shadowed_ = false;
+
// Generate code to 'execute' declarations and initialize functions
// (source elements). In case of an illegal redeclaration we need to
// handle that instead of processing the declarations.
@@ -470,7 +484,7 @@ void CodeGenerator::GenCode(FunctionLiteral* function) {
}
// Adjust for function-level loop nesting.
- loop_nesting_ -= function->loop_nesting();
+ loop_nesting_ -= info->loop_nesting();
// Code generation state must be reset.
ASSERT(state_ == NULL);
@@ -654,20 +668,29 @@ void DeferredReferenceSetKeyedValue::Generate() {
}
-void CodeGenerator::CallApplyLazy(Property* apply,
+void CodeGenerator::CallApplyLazy(Expression* applicand,
Expression* receiver,
VariableProxy* arguments,
int position) {
+ // An optimized implementation of expressions of the form
+ // x.apply(y, arguments).
+ // If the arguments object of the scope has not been allocated,
+ // and x.apply is Function.prototype.apply, this optimization
+ // just copies y and the arguments of the current function on the
+ // stack, as receiver and arguments, and calls x.
+ // In the implementation comments, we call x the applicand
+ // and y the receiver.
ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION);
ASSERT(arguments->IsArguments());
- JumpTarget slow, done;
-
- // Load the apply function onto the stack. This will usually
+ // Load applicand.apply onto the stack. This will usually
// give us a megamorphic load site. Not super, but it works.
- Reference ref(this, apply);
- ref.GetValue();
- ASSERT(ref.type() == Reference::NAMED);
+ Load(applicand);
+ Handle<String> name = Factory::LookupAsciiSymbol("apply");
+ frame()->Push(name);
+ Result answer = frame()->CallLoadIC(RelocInfo::CODE_TARGET);
+ __ nop();
+ frame()->Push(&answer);
// Load the receiver and the existing arguments object onto the
// expression stack. Avoid allocating the arguments object here.
@@ -677,6 +700,11 @@ void CodeGenerator::CallApplyLazy(Property* apply,
// Emit the source position information after having loaded the
// receiver and the arguments.
CodeForSourcePosition(position);
+ // Contents of frame at this point:
+ // Frame[0]: arguments object of the current function or the hole.
+ // Frame[1]: receiver
+ // Frame[2]: applicand.apply
+ // Frame[3]: applicand.
// Check if the arguments object has been lazily allocated
// already. If so, just use that instead of copying the arguments
@@ -684,143 +712,149 @@ void CodeGenerator::CallApplyLazy(Property* apply,
// named 'arguments' has been introduced.
frame_->Dup();
Result probe = frame_->Pop();
- bool try_lazy = true;
- if (probe.is_constant()) {
- try_lazy = probe.handle()->IsTheHole();
- } else {
- __ Cmp(probe.reg(), Factory::the_hole_value());
- probe.Unuse();
- slow.Branch(not_equal);
- }
-
- if (try_lazy) {
- JumpTarget build_args;
-
- // Get rid of the arguments object probe.
- frame_->Drop();
-
- // Before messing with the execution stack, we sync all
- // elements. This is bound to happen anyway because we're
- // about to call a function.
- frame_->SyncRange(0, frame_->element_count() - 1);
+ { VirtualFrame::SpilledScope spilled_scope;
+ Label slow, done;
+ bool try_lazy = true;
+ if (probe.is_constant()) {
+ try_lazy = probe.handle()->IsTheHole();
+ } else {
+ __ CompareRoot(probe.reg(), Heap::kTheHoleValueRootIndex);
+ probe.Unuse();
+ __ j(not_equal, &slow);
+ }
- // Check that the receiver really is a JavaScript object.
- {
- frame_->PushElementAt(0);
- Result receiver = frame_->Pop();
- receiver.ToRegister();
- Condition is_smi = masm_->CheckSmi(receiver.reg());
- build_args.Branch(is_smi);
+ if (try_lazy) {
+ Label build_args;
+ // Get rid of the arguments object probe.
+ frame_->Drop(); // Can be called on a spilled frame.
+ // Stack now has 3 elements on it.
+ // Contents of stack at this point:
+ // rsp[0]: receiver
+ // rsp[1]: applicand.apply
+ // rsp[2]: applicand.
+
+ // Check that the receiver really is a JavaScript object.
+ __ movq(rax, Operand(rsp, 0));
+ Condition is_smi = masm_->CheckSmi(rax);
+ __ j(is_smi, &build_args);
// We allow all JSObjects including JSFunctions. As long as
// JS_FUNCTION_TYPE is the last instance type and it is right
// after LAST_JS_OBJECT_TYPE, we do not have to check the upper
// bound.
ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
- __ CmpObjectType(receiver.reg(), FIRST_JS_OBJECT_TYPE, kScratchRegister);
- build_args.Branch(below);
- }
-
- // Verify that we're invoking Function.prototype.apply.
- {
- frame_->PushElementAt(1);
- Result apply = frame_->Pop();
- apply.ToRegister();
- Condition is_smi = masm_->CheckSmi(apply.reg());
- build_args.Branch(is_smi);
- Result tmp = allocator_->Allocate();
- __ CmpObjectType(apply.reg(), JS_FUNCTION_TYPE, tmp.reg());
- build_args.Branch(not_equal);
- __ movq(tmp.reg(),
- FieldOperand(apply.reg(), JSFunction::kSharedFunctionInfoOffset));
+ __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
+ __ j(below, &build_args);
+
+ // Check that applicand.apply is Function.prototype.apply.
+ __ movq(rax, Operand(rsp, kPointerSize));
+ is_smi = masm_->CheckSmi(rax);
+ __ j(is_smi, &build_args);
+ __ CmpObjectType(rax, JS_FUNCTION_TYPE, rcx);
+ __ j(not_equal, &build_args);
+ __ movq(rax, FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset));
Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
- __ Cmp(FieldOperand(tmp.reg(), SharedFunctionInfo::kCodeOffset),
- apply_code);
- build_args.Branch(not_equal);
- }
-
- // Get the function receiver from the stack. Check that it
- // really is a function.
- __ movq(rdi, Operand(rsp, 2 * kPointerSize));
- Condition is_smi = masm_->CheckSmi(rdi);
- build_args.Branch(is_smi);
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
- build_args.Branch(not_equal);
-
- // Copy the arguments to this function possibly from the
- // adaptor frame below it.
- Label invoke, adapted;
- __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(equal, &adapted);
-
- // No arguments adaptor frame. Copy fixed number of arguments.
- __ movq(rax, Immediate(scope_->num_parameters()));
- for (int i = 0; i < scope_->num_parameters(); i++) {
- __ push(frame_->ParameterAt(i));
+ __ Cmp(FieldOperand(rax, SharedFunctionInfo::kCodeOffset), apply_code);
+ __ j(not_equal, &build_args);
+
+ // Check that applicand is a function.
+ __ movq(rdi, Operand(rsp, 2 * kPointerSize));
+ is_smi = masm_->CheckSmi(rdi);
+ __ j(is_smi, &build_args);
+ __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
+ __ j(not_equal, &build_args);
+
+ // Copy the arguments to this function possibly from the
+ // adaptor frame below it.
+ Label invoke, adapted;
+ __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset),
+ Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ j(equal, &adapted);
+
+ // No arguments adaptor frame. Copy fixed number of arguments.
+ __ movq(rax, Immediate(scope_->num_parameters()));
+ for (int i = 0; i < scope_->num_parameters(); i++) {
+ __ push(frame_->ParameterAt(i));
+ }
+ __ jmp(&invoke);
+
+ // Arguments adaptor frame present. Copy arguments from there, but
+ // avoid copying too many arguments to avoid stack overflows.
+ __ bind(&adapted);
+ static const uint32_t kArgumentsLimit = 1 * KB;
+ __ movq(rax, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiToInteger32(rax, rax);
+ __ movq(rcx, rax);
+ __ cmpq(rax, Immediate(kArgumentsLimit));
+ __ j(above, &build_args);
+
+ // Loop through the arguments pushing them onto the execution
+ // stack. We don't inform the virtual frame of the push, so we don't
+ // have to worry about getting rid of the elements from the virtual
+ // frame.
+ Label loop;
+ // rcx is a small non-negative integer, due to the test above.
+ __ testl(rcx, rcx);
+ __ j(zero, &invoke);
+ __ bind(&loop);
+ __ push(Operand(rdx, rcx, times_pointer_size, 1 * kPointerSize));
+ __ decl(rcx);
+ __ j(not_zero, &loop);
+
+ // Invoke the function.
+ __ bind(&invoke);
+ ParameterCount actual(rax);
+ __ InvokeFunction(rdi, actual, CALL_FUNCTION);
+ // Drop applicand.apply and applicand from the stack, and push
+ // the result of the function call, but leave the spilled frame
+ // unchanged, with 3 elements, so it is correct when we compile the
+ // slow-case code.
+ __ addq(rsp, Immediate(2 * kPointerSize));
+ __ push(rax);
+ // Stack now has 1 element:
+ // rsp[0]: result
+ __ jmp(&done);
+
+ // Slow-case: Allocate the arguments object since we know it isn't
+ // there, and fall-through to the slow-case where we call
+ // applicand.apply.
+ __ bind(&build_args);
+ // Stack now has 3 elements, because we have jumped from where:
+ // rsp[0]: receiver
+ // rsp[1]: applicand.apply
+ // rsp[2]: applicand.
+
+ // StoreArgumentsObject requires a correct frame, and may modify it.
+ Result arguments_object = StoreArgumentsObject(false);
+ frame_->SpillAll();
+ arguments_object.ToRegister();
+ frame_->EmitPush(arguments_object.reg());
+ arguments_object.Unuse();
+ // Stack and frame now have 4 elements.
+ __ bind(&slow);
}
- __ jmp(&invoke);
-
- // Arguments adaptor frame present. Copy arguments from there, but
- // avoid copying too many arguments to avoid stack overflows.
- __ bind(&adapted);
- static const uint32_t kArgumentsLimit = 1 * KB;
- __ movq(rax, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiToInteger32(rax, rax);
- __ movq(rcx, rax);
- __ cmpq(rax, Immediate(kArgumentsLimit));
- build_args.Branch(above);
- // Loop through the arguments pushing them onto the execution
- // stack. We don't inform the virtual frame of the push, so we don't
- // have to worry about getting rid of the elements from the virtual
- // frame.
- Label loop;
- __ testl(rcx, rcx);
- __ j(zero, &invoke);
- __ bind(&loop);
- __ push(Operand(rdx, rcx, times_pointer_size, 1 * kPointerSize));
- __ decl(rcx);
- __ j(not_zero, &loop);
-
- // Invoke the function. The virtual frame knows about the receiver
- // so make sure to forget that explicitly.
- __ bind(&invoke);
- ParameterCount actual(rax);
- __ InvokeFunction(rdi, actual, CALL_FUNCTION);
- frame_->Forget(1);
- Result result = allocator()->Allocate(rax);
- frame_->SetElementAt(0, &result);
- done.Jump();
-
- // Slow-case: Allocate the arguments object since we know it isn't
- // there, and fall-through to the slow-case where we call
- // Function.prototype.apply.
- build_args.Bind();
- Result arguments_object = StoreArgumentsObject(false);
- frame_->Push(&arguments_object);
- slow.Bind();
- }
-
- // Flip the apply function and the function to call on the stack, so
- // the function looks like the receiver of the apply call. This way,
- // the generic Function.prototype.apply implementation can deal with
- // the call like it usually does.
- Result a2 = frame_->Pop();
- Result a1 = frame_->Pop();
- Result ap = frame_->Pop();
- Result fn = frame_->Pop();
- frame_->Push(&ap);
- frame_->Push(&fn);
- frame_->Push(&a1);
- frame_->Push(&a2);
- CallFunctionStub call_function(2, NOT_IN_LOOP);
- Result res = frame_->CallStub(&call_function, 3);
- frame_->Push(&res);
-
- // All done. Restore context register after call.
- if (try_lazy) done.Bind();
+ // Generic computation of x.apply(y, args) with no special optimization.
+ // Flip applicand.apply and applicand on the stack, so
+ // applicand looks like the receiver of the applicand.apply call.
+ // Then process it as a normal function call.
+ __ movq(rax, Operand(rsp, 3 * kPointerSize));
+ __ movq(rbx, Operand(rsp, 2 * kPointerSize));
+ __ movq(Operand(rsp, 2 * kPointerSize), rax);
+ __ movq(Operand(rsp, 3 * kPointerSize), rbx);
+
+ CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS);
+ Result res = frame_->CallStub(&call_function, 3);
+ // The function and its two arguments have been dropped.
+ frame_->Drop(1); // Drop the receiver as well.
+ res.ToRegister();
+ frame_->EmitPush(res.reg());
+ // Stack now has 1 element:
+ // rsp[0]: result
+ if (try_lazy) __ bind(&done);
+ } // End of spilled scope.
+ // Restore the context register after a call.
frame_->RestoreContextRegister();
}
@@ -1817,28 +1851,20 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
if (!each.is_illegal()) {
if (each.size() > 0) {
frame_->EmitPush(frame_->ElementAt(each.size()));
- }
- // If the reference was to a slot we rely on the convenient property
- // that it doesn't matter whether a value (eg, ebx pushed above) is
- // right on top of or right underneath a zero-sized reference.
- each.SetValue(NOT_CONST_INIT);
- if (each.size() > 0) {
- // It's safe to pop the value lying on top of the reference before
- // unloading the reference itself (which preserves the top of stack,
- // ie, now the topmost value of the non-zero sized reference), since
- // we will discard the top of stack after unloading the reference
- // anyway.
- frame_->Drop();
+ each.SetValue(NOT_CONST_INIT);
+ frame_->Drop(2); // Drop the original and the copy of the element.
+ } else {
+ // If the reference has size zero then we can use the value below
+ // the reference as if it were above the reference, instead of pushing
+ // a new copy of it above the reference.
+ each.SetValue(NOT_CONST_INIT);
+ frame_->Drop(); // Drop the original of the element.
}
}
}
// Unloading a reference may leave the frame in an unspilled state.
frame_->SpillAll();
- // Discard the i'th entry pushed above or else the remainder of the
- // reference, whichever is currently on top of the stack.
- frame_->Drop();
-
// Body.
CheckStack(); // TODO(1222600): ignore if body contains calls.
VisitAndSpill(node->body());
@@ -2198,7 +2224,9 @@ void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
#ifdef ENABLE_DEBUGGER_SUPPORT
// Spill everything, even constants, to the frame.
frame_->SpillAll();
- frame_->CallRuntime(Runtime::kDebugBreak, 0);
+
+ DebuggerStatementStub ces;
+ frame_->CallStub(&ces, 0);
// Ignore the return value.
#endif
}
@@ -2423,8 +2451,6 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
Load(property->value());
frame_->Push(key);
Result ignored = frame_->CallStoreIC();
- // Drop the duplicated receiver and ignore the result.
- frame_->Drop();
break;
}
// Fall through
@@ -2549,7 +2575,7 @@ void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
void CodeGenerator::VisitAssignment(Assignment* node) {
Comment cmnt(masm_, "[ Assignment");
- { Reference target(this, node->target());
+ { Reference target(this, node->target(), node->is_compound());
if (target.is_illegal()) {
// Fool the virtual frame into thinking that we left the assignment's
// value on the frame.
@@ -2571,12 +2597,27 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
frame_->PushElementAt(target.size() - 1);
Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1);
}
+ if (node->ends_initialization_block()) {
+ // Add an extra copy of the receiver to the frame, so that it can be
+ // converted back to fast case after the assignment.
+ ASSERT(target.type() == Reference::NAMED ||
+ target.type() == Reference::KEYED);
+ if (target.type() == Reference::NAMED) {
+ frame_->Dup();
+ // Dup target receiver on stack.
+ } else {
+ ASSERT(target.type() == Reference::KEYED);
+ Result temp = frame_->Pop();
+ frame_->Dup();
+ frame_->Push(&temp);
+ }
+ }
if (node->op() == Token::ASSIGN ||
node->op() == Token::INIT_VAR ||
node->op() == Token::INIT_CONST) {
Load(node->value());
- } else {
+ } else { // Assignment is a compound assignment.
Literal* literal = node->value()->AsLiteral();
bool overwrite_value =
(node->value()->AsBinaryOperation() != NULL &&
@@ -2602,6 +2643,7 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
var->mode() == Variable::CONST &&
node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
// Assignment ignored - leave the value on the stack.
+ UnloadReference(&target);
} else {
CodeForSourcePosition(node->position());
if (node->op() == Token::INIT_CONST) {
@@ -2613,13 +2655,15 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
target.SetValue(NOT_CONST_INIT);
}
if (node->ends_initialization_block()) {
- ASSERT(target.type() == Reference::NAMED ||
- target.type() == Reference::KEYED);
+ ASSERT(target.type() == Reference::UNLOADED);
// End of initialization block. Revert to fast case. The
- // argument to the runtime call is the receiver, which is the
- // first value pushed as part of the reference, which is below
- // the lhs value.
- frame_->PushElementAt(target.size());
+ // argument to the runtime call is the extra copy of the receiver,
+ // which is below the value of the assignment.
+ // Swap the receiver and the value of the assignment expression.
+ Result lhs = frame_->Pop();
+ Result receiver = frame_->Pop();
+ frame_->Push(&lhs);
+ frame_->Push(&receiver);
Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
}
}
@@ -2706,7 +2750,7 @@ void CodeGenerator::VisitCall(Call* node) {
// Call the function.
CodeForSourcePosition(node->position());
InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
- CallFunctionStub call_function(arg_count, in_loop);
+ CallFunctionStub call_function(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
result = frame_->CallStub(&call_function, arg_count + 1);
// Restore the context and overwrite the function on the stack with
@@ -2767,7 +2811,7 @@ void CodeGenerator::VisitCall(Call* node) {
frame_->EmitPush(rdx);
// Call the function.
- CallWithArguments(args, node->position());
+ CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
} else if (property != NULL) {
// Check if the key is a literal string.
@@ -2787,7 +2831,7 @@ void CodeGenerator::VisitCall(Call* node) {
args->at(1)->AsVariableProxy()->IsArguments()) {
// Use the optimized Function.prototype.apply that avoids
// allocating lazily allocated arguments objects.
- CallApplyLazy(property,
+ CallApplyLazy(property->obj(),
args->at(0),
args->at(1)->AsVariableProxy(),
node->position());
@@ -2819,20 +2863,28 @@ void CodeGenerator::VisitCall(Call* node) {
// -------------------------------------------
// Load the function to call from the property through a reference.
- Reference ref(this, property);
- ref.GetValue();
-
- // Pass receiver to called function.
if (property->is_synthetic()) {
+ Reference ref(this, property, false);
+ ref.GetValue();
// Use global object as receiver.
LoadGlobalReceiver();
} else {
- // The reference's size is non-negative.
- frame_->PushElementAt(ref.size());
+ Reference ref(this, property, false);
+ ASSERT(ref.size() == 2);
+ Result key = frame_->Pop();
+ frame_->Dup(); // Duplicate the receiver.
+ frame_->Push(&key);
+ ref.GetValue();
+ // Top of frame contains function to call, with duplicate copy of
+ // receiver below it. Swap them.
+ Result function = frame_->Pop();
+ Result receiver = frame_->Pop();
+ frame_->Push(&function);
+ frame_->Push(&receiver);
}
// Call the function.
- CallWithArguments(args, node->position());
+ CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position());
}
} else {
@@ -2847,7 +2899,7 @@ void CodeGenerator::VisitCall(Call* node) {
LoadGlobalReceiver();
// Call the function.
- CallWithArguments(args, node->position());
+ CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
}
}
@@ -3012,6 +3064,9 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
}
} else {
+ bool overwrite =
+ (node->expression()->AsBinaryOperation() != NULL &&
+ node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
Load(node->expression());
switch (op) {
case Token::NOT:
@@ -3021,9 +3076,6 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
break;
case Token::SUB: {
- bool overwrite =
- (node->expression()->AsBinaryOperation() != NULL &&
- node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
GenericUnaryOpStub stub(Token::SUB, overwrite);
// TODO(1222589): remove dependency of TOS being cached inside stub
Result operand = frame_->Pop();
@@ -3042,10 +3094,10 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
Condition is_smi = masm_->CheckSmi(operand.reg());
smi_label.Branch(is_smi, &operand);
- frame_->Push(&operand); // undo popping of TOS
- Result answer = frame_->InvokeBuiltin(Builtins::BIT_NOT,
- CALL_FUNCTION, 1);
+ GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
+ Result answer = frame_->CallStub(&stub, &operand);
continue_label.Jump(&answer);
+
smi_label.Bind(&answer);
answer.ToRegister();
frame_->Spill(answer.reg());
@@ -3167,7 +3219,9 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
// value will be in the frame to be spilled.
if (is_postfix) frame_->Push(Smi::FromInt(0));
- { Reference target(this, node->expression());
+ // A constant reference is not saved to, so the reference is not a
+ // compound assignment reference.
+ { Reference target(this, node->expression(), !is_const);
if (target.is_illegal()) {
// Spoof the virtual frame to have the expected height (one higher
// than on entry).
@@ -3622,6 +3676,22 @@ void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
}
+void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ Result obj = frame_->Pop();
+ obj.ToRegister();
+ Condition is_smi = masm_->CheckSmi(obj.reg());
+ destination()->false_target()->Branch(is_smi);
+ __ movq(kScratchRegister, FieldOperand(obj.reg(), HeapObject::kMapOffset));
+ __ movzxbl(kScratchRegister,
+ FieldOperand(kScratchRegister, Map::kBitFieldOffset));
+ __ testl(kScratchRegister, Immediate(1 << Map::kIsUndetectable));
+ obj.Unuse();
+ destination()->Split(not_zero);
+}
+
+
void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
ASSERT(args->length() == 0);
@@ -3902,7 +3972,8 @@ void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
Load(args->at(1));
Load(args->at(2));
Load(args->at(3));
- Result result = frame_->CallRuntime(Runtime::kRegExpExec, 4);
+ RegExpExecStub stub;
+ Result result = frame_->CallStub(&stub, 4);
frame_->Push(&result);
}
@@ -3926,7 +3997,8 @@ void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
Load(args->at(1));
Load(args->at(2));
- Result answer = frame_->CallRuntime(Runtime::kSubString, 3);
+ SubStringStub stub;
+ Result answer = frame_->CallStub(&stub, 3);
frame_->Push(&answer);
}
@@ -3937,7 +4009,8 @@ void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
Load(args->at(0));
Load(args->at(1));
- Result answer = frame_->CallRuntime(Runtime::kStringCompare, 2);
+ StringCompareStub stub;
+ Result answer = frame_->CallStub(&stub, 2);
frame_->Push(&answer);
}
@@ -4238,14 +4311,19 @@ bool CodeGenerator::IsUnsafeSmi(Handle<Object> value) {
//------------------------------------------------------------------------------
// CodeGenerator implementation of variables, lookups, and stores.
-Reference::Reference(CodeGenerator* cgen, Expression* expression)
- : cgen_(cgen), expression_(expression), type_(ILLEGAL) {
+Reference::Reference(CodeGenerator* cgen,
+ Expression* expression,
+ bool persist_after_get)
+ : cgen_(cgen),
+ expression_(expression),
+ type_(ILLEGAL),
+ persist_after_get_(persist_after_get) {
cgen->LoadReference(this);
}
Reference::~Reference() {
- cgen_->UnloadReference(this);
+ ASSERT(is_unloaded() || is_illegal());
}
@@ -4295,6 +4373,7 @@ void CodeGenerator::UnloadReference(Reference* ref) {
// Pop a reference from the stack while preserving TOS.
Comment cmnt(masm_, "[ UnloadReference");
frame_->Nip(ref->size());
+ ref->set_unloaded();
}
@@ -5013,31 +5092,6 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op,
return;
}
- // Set the flags based on the operation, type and loop nesting level.
- GenericBinaryFlags flags;
- switch (op) {
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SHL:
- case Token::SHR:
- case Token::SAR:
- // Bit operations always assume they likely operate on Smis. Still only
- // generate the inline Smi check code if this operation is part of a loop.
- flags = (loop_nesting() > 0)
- ? NO_SMI_CODE_IN_STUB
- : NO_GENERIC_BINARY_FLAGS;
- break;
-
- default:
- // By default only inline the Smi check code for likely smis if this
- // operation is part of a loop.
- flags = ((loop_nesting() > 0) && type->IsLikelySmi())
- ? NO_SMI_CODE_IN_STUB
- : NO_GENERIC_BINARY_FLAGS;
- break;
- }
-
Result right = frame_->Pop();
Result left = frame_->Pop();
@@ -5071,7 +5125,6 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op,
bool left_is_non_smi = left.is_constant() && !left.handle()->IsSmi();
bool right_is_smi = right.is_constant() && right.handle()->IsSmi();
bool right_is_non_smi = right.is_constant() && !right.handle()->IsSmi();
- bool generate_no_smi_code = false; // No smi code at all, inline or in stub.
if (left_is_smi && right_is_smi) {
// Compute the constant result at compile time, and leave it on the frame.
@@ -5080,34 +5133,30 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op,
if (FoldConstantSmis(op, left_int, right_int)) return;
}
+ Result answer;
if (left_is_non_smi || right_is_non_smi) {
- // Set flag so that we go straight to the slow case, with no smi code.
- generate_no_smi_code = true;
+ GenericBinaryOpStub stub(op, overwrite_mode, NO_SMI_CODE_IN_STUB);
+ answer = stub.GenerateCall(masm_, frame_, &left, &right);
} else if (right_is_smi) {
- ConstantSmiBinaryOperation(op, &left, right.handle(),
- type, false, overwrite_mode);
- return;
+ answer = ConstantSmiBinaryOperation(op, &left, right.handle(),
+ type, false, overwrite_mode);
} else if (left_is_smi) {
- ConstantSmiBinaryOperation(op, &right, left.handle(),
- type, true, overwrite_mode);
- return;
- }
-
- if ((flags & NO_SMI_CODE_IN_STUB) != 0 && !generate_no_smi_code) {
- LikelySmiBinaryOperation(op, &left, &right, overwrite_mode);
+ answer = ConstantSmiBinaryOperation(op, &right, left.handle(),
+ type, true, overwrite_mode);
} else {
- frame_->Push(&left);
- frame_->Push(&right);
- // If we know the arguments aren't smis, use the binary operation stub
- // that does not check for the fast smi case.
- // The same stub is used for NO_SMI_CODE and SMI_CODE_INLINED.
- if (generate_no_smi_code) {
- flags = NO_SMI_CODE_IN_STUB;
+ // Set the flags based on the operation, type and loop nesting level.
+ // Bit operations always assume they likely operate on Smis. Still only
+ // generate the inline Smi check code if this operation is part of a loop.
+ // For all other operations only inline the Smi check code for likely smis
+ // if the operation is part of a loop.
+ if (loop_nesting() > 0 && (Token::IsBitOp(op) || type->IsLikelySmi())) {
+ answer = LikelySmiBinaryOperation(op, &left, &right, overwrite_mode);
+ } else {
+ GenericBinaryOpStub stub(op, overwrite_mode, NO_GENERIC_BINARY_FLAGS);
+ answer = stub.GenerateCall(masm_, frame_, &left, &right);
}
- GenericBinaryOpStub stub(op, overwrite_mode, flags);
- Result answer = frame_->CallStub(&stub, 2);
- frame_->Push(&answer);
}
+ frame_->Push(&answer);
}
@@ -5188,12 +5237,12 @@ void DeferredInlineSmiOperation::Generate() {
}
-void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
- Result* operand,
- Handle<Object> value,
- StaticType* type,
- bool reversed,
- OverwriteMode overwrite_mode) {
+Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
+ Result* operand,
+ Handle<Object> value,
+ StaticType* type,
+ bool reversed,
+ OverwriteMode overwrite_mode) {
// NOTE: This is an attempt to inline (a bit) more of the code for
// some possible smi operations (like + and -) when (at least) one
// of the operands is a constant smi.
@@ -5204,20 +5253,19 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
if (IsUnsafeSmi(value)) {
Result unsafe_operand(value);
if (reversed) {
- LikelySmiBinaryOperation(op, &unsafe_operand, operand,
+ return LikelySmiBinaryOperation(op, &unsafe_operand, operand,
overwrite_mode);
} else {
- LikelySmiBinaryOperation(op, operand, &unsafe_operand,
+ return LikelySmiBinaryOperation(op, operand, &unsafe_operand,
overwrite_mode);
}
- ASSERT(!operand->is_valid());
- return;
}
// Get the literal value.
Smi* smi_value = Smi::cast(*value);
int int_value = smi_value->value();
+ Result answer;
switch (op) {
case Token::ADD: {
operand->ToRegister();
@@ -5238,15 +5286,15 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
smi_value,
deferred->entry_label());
deferred->BindExit();
- frame_->Push(operand);
+ answer = *operand;
break;
}
case Token::SUB: {
if (reversed) {
Result constant_operand(value);
- LikelySmiBinaryOperation(op, &constant_operand, operand,
- overwrite_mode);
+ answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
+ overwrite_mode);
} else {
operand->ToRegister();
frame_->Spill(operand->reg());
@@ -5260,7 +5308,7 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
smi_value,
deferred->entry_label());
deferred->BindExit();
- frame_->Push(operand);
+ answer = *operand;
}
break;
}
@@ -5268,8 +5316,8 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
case Token::SAR:
if (reversed) {
Result constant_operand(value);
- LikelySmiBinaryOperation(op, &constant_operand, operand,
- overwrite_mode);
+ answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
+ overwrite_mode);
} else {
// Only the least significant 5 bits of the shift value are used.
// In the slow case, this masking is done inside the runtime call.
@@ -5287,21 +5335,21 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
operand->reg(),
shift_value);
deferred->BindExit();
- frame_->Push(operand);
+ answer = *operand;
}
break;
case Token::SHR:
if (reversed) {
Result constant_operand(value);
- LikelySmiBinaryOperation(op, &constant_operand, operand,
- overwrite_mode);
+ answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
+ overwrite_mode);
} else {
// Only the least significant 5 bits of the shift value are used.
// In the slow case, this masking is done inside the runtime call.
int shift_value = int_value & 0x1f;
operand->ToRegister();
- Result answer = allocator()->Allocate();
+ answer = allocator()->Allocate();
ASSERT(answer.is_valid());
DeferredInlineSmiOperation* deferred =
new DeferredInlineSmiOperation(op,
@@ -5316,15 +5364,14 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
deferred->entry_label());
deferred->BindExit();
operand->Unuse();
- frame_->Push(&answer);
}
break;
case Token::SHL:
if (reversed) {
Result constant_operand(value);
- LikelySmiBinaryOperation(op, &constant_operand, operand,
- overwrite_mode);
+ answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
+ overwrite_mode);
} else {
// Only the least significant 5 bits of the shift value are used.
// In the slow case, this masking is done inside the runtime call.
@@ -5341,10 +5388,10 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
overwrite_mode);
__ JumpIfNotSmi(operand->reg(), deferred->entry_label());
deferred->BindExit();
- frame_->Push(operand);
+ answer = *operand;
} else {
// Use a fresh temporary for nonzero shift values.
- Result answer = allocator()->Allocate();
+ answer = allocator()->Allocate();
ASSERT(answer.is_valid());
DeferredInlineSmiOperation* deferred =
new DeferredInlineSmiOperation(op,
@@ -5359,7 +5406,6 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
deferred->entry_label());
deferred->BindExit();
operand->Unuse();
- frame_->Push(&answer);
}
}
break;
@@ -5394,7 +5440,7 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
}
}
deferred->BindExit();
- frame_->Push(operand);
+ answer = *operand;
break;
}
@@ -5422,7 +5468,7 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
Smi::FromInt(int_value - 1));
}
deferred->BindExit();
- frame_->Push(operand);
+ answer = *operand;
break; // This break only applies if we generated code for MOD.
}
// Fall through if we did not find a power of 2 on the right hand side!
@@ -5431,22 +5477,24 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
default: {
Result constant_operand(value);
if (reversed) {
- LikelySmiBinaryOperation(op, &constant_operand, operand,
- overwrite_mode);
+ answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
+ overwrite_mode);
} else {
- LikelySmiBinaryOperation(op, operand, &constant_operand,
- overwrite_mode);
+ answer = LikelySmiBinaryOperation(op, operand, &constant_operand,
+ overwrite_mode);
}
break;
}
}
- ASSERT(!operand->is_valid());
+ ASSERT(answer.is_valid());
+ return answer;
}
-void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
- Result* left,
- Result* right,
- OverwriteMode overwrite_mode) {
+Result CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
+ Result* left,
+ Result* right,
+ OverwriteMode overwrite_mode) {
+ Result answer;
// Special handling of div and mod because they use fixed registers.
if (op == Token::DIV || op == Token::MOD) {
// We need rax as the quotient register, rdx as the remainder
@@ -5528,16 +5576,17 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
deferred->BindExit();
left->Unuse();
right->Unuse();
- frame_->Push(&quotient);
+ answer = quotient;
} else {
ASSERT(op == Token::MOD);
__ SmiMod(rdx, left->reg(), right->reg(), deferred->entry_label());
deferred->BindExit();
left->Unuse();
right->Unuse();
- frame_->Push(&remainder);
+ answer = remainder;
}
- return;
+ ASSERT(answer.is_valid());
+ return answer;
}
// Special handling of shift operations because they use fixed
@@ -5558,7 +5607,7 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
frame_->Spill(rcx);
// Use a fresh answer register to avoid spilling the left operand.
- Result answer = allocator_->Allocate();
+ answer = allocator_->Allocate();
ASSERT(answer.is_valid());
// Check that both operands are smis using the answer register as a
// temporary.
@@ -5597,8 +5646,8 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
deferred->BindExit();
left->Unuse();
right->Unuse();
- frame_->Push(&answer);
- return;
+ ASSERT(answer.is_valid());
+ return answer;
}
// Handle the other binary operations.
@@ -5607,7 +5656,7 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
// A newly allocated register answer is used to hold the answer. The
// registers containing left and right are not modified so they don't
// need to be spilled in the fast case.
- Result answer = allocator_->Allocate();
+ answer = allocator_->Allocate();
ASSERT(answer.is_valid());
// Perform the smi tag check.
@@ -5661,7 +5710,122 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
deferred->BindExit();
left->Unuse();
right->Unuse();
- frame_->Push(&answer);
+ ASSERT(answer.is_valid());
+ return answer;
+}
+
+
+Result CodeGenerator::EmitKeyedLoad(bool is_global) {
+ Comment cmnt(masm_, "[ Load from keyed Property");
+ // Inline array load code if inside of a loop. We do not know
+ // the receiver map yet, so we initially generate the code with
+ // a check against an invalid map. In the inline cache code, we
+ // patch the map check if appropriate.
+ if (loop_nesting() > 0) {
+ Comment cmnt(masm_, "[ Inlined load from keyed Property");
+
+ Result key = frame_->Pop();
+ Result receiver = frame_->Pop();
+ key.ToRegister();
+ receiver.ToRegister();
+
+ // Use a fresh temporary to load the elements without destroying
+ // the receiver which is needed for the deferred slow case.
+ Result elements = allocator()->Allocate();
+ ASSERT(elements.is_valid());
+
+ // Use a fresh temporary for the index and later the loaded
+ // value.
+ Result index = allocator()->Allocate();
+ ASSERT(index.is_valid());
+
+ DeferredReferenceGetKeyedValue* deferred =
+ new DeferredReferenceGetKeyedValue(index.reg(),
+ receiver.reg(),
+ key.reg(),
+ is_global);
+
+ // Check that the receiver is not a smi (only needed if this
+ // is not a load from the global context) and that it has the
+ // expected map.
+ if (!is_global) {
+ __ JumpIfSmi(receiver.reg(), deferred->entry_label());
+ }
+
+ // Initially, use an invalid map. The map is patched in the IC
+ // initialization code.
+ __ bind(deferred->patch_site());
+ // Use masm-> here instead of the double underscore macro since extra
+ // coverage code can interfere with the patching. Do not use
+ // root array to load null_value, since it must be patched with
+ // the expected receiver map.
+ masm_->movq(kScratchRegister, Factory::null_value(),
+ RelocInfo::EMBEDDED_OBJECT);
+ masm_->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
+ kScratchRegister);
+ deferred->Branch(not_equal);
+
+ // Check that the key is a non-negative smi.
+ __ JumpIfNotPositiveSmi(key.reg(), deferred->entry_label());
+
+ // Get the elements array from the receiver and check that it
+ // is not a dictionary.
+ __ movq(elements.reg(),
+ FieldOperand(receiver.reg(), JSObject::kElementsOffset));
+ __ Cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
+ Factory::fixed_array_map());
+ deferred->Branch(not_equal);
+
+ // Shift the key to get the actual index value and check that
+ // it is within bounds.
+ __ SmiToInteger32(index.reg(), key.reg());
+ __ cmpl(index.reg(),
+ FieldOperand(elements.reg(), FixedArray::kLengthOffset));
+ deferred->Branch(above_equal);
+
+ // The index register holds the un-smi-tagged key. It has been
+ // zero-extended to 64-bits, so it can be used directly as index in the
+ // operand below.
+ // Load and check that the result is not the hole. We could
+ // reuse the index or elements register for the value.
+ //
+ // TODO(206): Consider whether it makes sense to try some
+ // heuristic about which register to reuse. For example, if
+ // one is rax, the we can reuse that one because the value
+ // coming from the deferred code will be in rax.
+ Result value = index;
+ __ movq(value.reg(),
+ Operand(elements.reg(),
+ index.reg(),
+ times_pointer_size,
+ FixedArray::kHeaderSize - kHeapObjectTag));
+ elements.Unuse();
+ index.Unuse();
+ __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex);
+ deferred->Branch(equal);
+ __ IncrementCounter(&Counters::keyed_load_inline, 1);
+
+ deferred->BindExit();
+ // Restore the receiver and key to the frame and push the
+ // result on top of it.
+ frame_->Push(&receiver);
+ frame_->Push(&key);
+ return value;
+
+ } else {
+ Comment cmnt(masm_, "[ Load from keyed Property");
+ RelocInfo::Mode mode = is_global
+ ? RelocInfo::CODE_TARGET_CONTEXT
+ : RelocInfo::CODE_TARGET;
+ Result answer = frame_->CallKeyedLoadIC(mode);
+ // Make sure that we do not have a test instruction after the
+ // call. A test instruction after the call is used to
+ // indicate that we have generated an inline version of the
+ // keyed load. The explicit nop instruction is here because
+ // the push that follows might be peep-hole optimized away.
+ __ nop();
+ return answer;
+ }
}
@@ -5794,119 +5958,18 @@ void Reference::GetValue() {
bool is_global = var != NULL;
ASSERT(!is_global || var->is_global());
- // Inline array load code if inside of a loop. We do not know
- // the receiver map yet, so we initially generate the code with
- // a check against an invalid map. In the inline cache code, we
- // patch the map check if appropriate.
- if (cgen_->loop_nesting() > 0) {
- Comment cmnt(masm, "[ Inlined load from keyed Property");
-
- Result key = cgen_->frame()->Pop();
- Result receiver = cgen_->frame()->Pop();
- key.ToRegister();
- receiver.ToRegister();
-
- // Use a fresh temporary to load the elements without destroying
- // the receiver which is needed for the deferred slow case.
- Result elements = cgen_->allocator()->Allocate();
- ASSERT(elements.is_valid());
-
- // Use a fresh temporary for the index and later the loaded
- // value.
- Result index = cgen_->allocator()->Allocate();
- ASSERT(index.is_valid());
-
- DeferredReferenceGetKeyedValue* deferred =
- new DeferredReferenceGetKeyedValue(index.reg(),
- receiver.reg(),
- key.reg(),
- is_global);
-
- // Check that the receiver is not a smi (only needed if this
- // is not a load from the global context) and that it has the
- // expected map.
- if (!is_global) {
- __ JumpIfSmi(receiver.reg(), deferred->entry_label());
- }
-
- // Initially, use an invalid map. The map is patched in the IC
- // initialization code.
- __ bind(deferred->patch_site());
- // Use masm-> here instead of the double underscore macro since extra
- // coverage code can interfere with the patching.
- masm->movq(kScratchRegister, Factory::null_value(),
- RelocInfo::EMBEDDED_OBJECT);
- masm->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
- kScratchRegister);
- deferred->Branch(not_equal);
-
- // Check that the key is a non-negative smi.
- __ JumpIfNotPositiveSmi(key.reg(), deferred->entry_label());
-
- // Get the elements array from the receiver and check that it
- // is not a dictionary.
- __ movq(elements.reg(),
- FieldOperand(receiver.reg(), JSObject::kElementsOffset));
- __ Cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
- Factory::fixed_array_map());
- deferred->Branch(not_equal);
-
- // Shift the key to get the actual index value and check that
- // it is within bounds.
- __ SmiToInteger32(index.reg(), key.reg());
- __ cmpl(index.reg(),
- FieldOperand(elements.reg(), FixedArray::kLengthOffset));
- deferred->Branch(above_equal);
-
- // The index register holds the un-smi-tagged key. It has been
- // zero-extended to 64-bits, so it can be used directly as index in the
- // operand below.
- // Load and check that the result is not the hole. We could
- // reuse the index or elements register for the value.
- //
- // TODO(206): Consider whether it makes sense to try some
- // heuristic about which register to reuse. For example, if
- // one is rax, the we can reuse that one because the value
- // coming from the deferred code will be in rax.
- Result value = index;
- __ movq(value.reg(),
- Operand(elements.reg(),
- index.reg(),
- times_pointer_size,
- FixedArray::kHeaderSize - kHeapObjectTag));
- elements.Unuse();
- index.Unuse();
- __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex);
- deferred->Branch(equal);
- __ IncrementCounter(&Counters::keyed_load_inline, 1);
-
- deferred->BindExit();
- // Restore the receiver and key to the frame and push the
- // result on top of it.
- cgen_->frame()->Push(&receiver);
- cgen_->frame()->Push(&key);
- cgen_->frame()->Push(&value);
-
- } else {
- Comment cmnt(masm, "[ Load from keyed Property");
- RelocInfo::Mode mode = is_global
- ? RelocInfo::CODE_TARGET_CONTEXT
- : RelocInfo::CODE_TARGET;
- Result answer = cgen_->frame()->CallKeyedLoadIC(mode);
- // Make sure that we do not have a test instruction after the
- // call. A test instruction after the call is used to
- // indicate that we have generated an inline version of the
- // keyed load. The explicit nop instruction is here because
- // the push that follows might be peep-hole optimized away.
- __ nop();
- cgen_->frame()->Push(&answer);
- }
+ Result value = cgen_->EmitKeyedLoad(is_global);
+ cgen_->frame()->Push(&value);
break;
}
default:
UNREACHABLE();
}
+
+ if (!persist_after_get_) {
+ cgen_->UnloadReference(this);
+ }
}
@@ -5943,6 +6006,9 @@ void Reference::TakeValue() {
ASSERT(slot->type() == Slot::LOCAL);
cgen_->frame()->TakeLocalAt(slot->index());
}
+
+ ASSERT(persist_after_get_);
+ // Do not unload the reference, because it is used in SetValue.
}
@@ -5956,6 +6022,7 @@ void Reference::SetValue(InitState init_state) {
Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
ASSERT(slot != NULL);
cgen_->StoreToSlot(slot, init_state);
+ cgen_->UnloadReference(this);
break;
}
@@ -5964,6 +6031,7 @@ void Reference::SetValue(InitState init_state) {
cgen_->frame()->Push(GetName());
Result answer = cgen_->frame()->CallStoreIC();
cgen_->frame()->Push(&answer);
+ set_unloaded();
break;
}
@@ -6065,6 +6133,7 @@ void Reference::SetValue(InitState init_state) {
masm->nop();
cgen_->frame()->Push(&answer);
}
+ cgen_->UnloadReference(this);
break;
}
@@ -6212,19 +6281,17 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
- // TODO(X64): This method is identical to the ia32 version.
- // Either find a reason to change it, or move it somewhere where it can be
- // shared. (Notice: It assumes that a Smi can fit in an int).
-
Object* answer_object = Heap::undefined_value();
switch (op) {
case Token::ADD:
- if (Smi::IsValid(left + right)) {
+ // Use intptr_t to detect overflow of 32-bit int.
+ if (Smi::IsValid(static_cast<intptr_t>(left) + right)) {
answer_object = Smi::FromInt(left + right);
}
break;
case Token::SUB:
- if (Smi::IsValid(left - right)) {
+ // Use intptr_t to detect overflow of 32-bit int.
+ if (Smi::IsValid(static_cast<intptr_t>(left) - right)) {
answer_object = Smi::FromInt(left - right);
}
break;
@@ -6298,56 +6365,573 @@ bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
// End of CodeGenerator implementation.
+// Get the integer part of a heap number. Surprisingly, all this bit twiddling
+// is faster than using the built-in instructions on floating point registers.
+// Trashes rdi and rbx. Dest is rcx. Source cannot be rcx or one of the
+// trashed registers.
+void IntegerConvert(MacroAssembler* masm,
+ Register source,
+ bool use_sse3,
+ Label* conversion_failure) {
+ ASSERT(!source.is(rcx) && !source.is(rdi) && !source.is(rbx));
+ Label done, right_exponent, normal_exponent;
+ Register scratch = rbx;
+ Register scratch2 = rdi;
+ // Get exponent word.
+ __ movl(scratch, FieldOperand(source, HeapNumber::kExponentOffset));
+ // Get exponent alone in scratch2.
+ __ movl(scratch2, scratch);
+ __ and_(scratch2, Immediate(HeapNumber::kExponentMask));
+ if (use_sse3) {
+ CpuFeatures::Scope scope(SSE3);
+ // Check whether the exponent is too big for a 64 bit signed integer.
+ static const uint32_t kTooBigExponent =
+ (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
+ __ cmpl(scratch2, Immediate(kTooBigExponent));
+ __ j(greater_equal, conversion_failure);
+ // Load x87 register with heap number.
+ __ fld_d(FieldOperand(source, HeapNumber::kValueOffset));
+ // Reserve space for 64 bit answer.
+ __ subq(rsp, Immediate(sizeof(uint64_t))); // Nolint.
+ // Do conversion, which cannot fail because we checked the exponent.
+ __ fisttp_d(Operand(rsp, 0));
+ __ movl(rcx, Operand(rsp, 0)); // Load low word of answer into rcx.
+ __ addq(rsp, Immediate(sizeof(uint64_t))); // Nolint.
+ } else {
+ // Load rcx with zero. We use this either for the final shift or
+ // for the answer.
+ __ xor_(rcx, rcx);
+ // Check whether the exponent matches a 32 bit signed int that cannot be
+ // represented by a Smi. A non-smi 32 bit integer is 1.xxx * 2^30 so the
+ // exponent is 30 (biased). This is the exponent that we are fastest at and
+ // also the highest exponent we can handle here.
+ const uint32_t non_smi_exponent =
+ (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
+ __ cmpl(scratch2, Immediate(non_smi_exponent));
+ // If we have a match of the int32-but-not-Smi exponent then skip some
+ // logic.
+ __ j(equal, &right_exponent);
+ // If the exponent is higher than that then go to slow case. This catches
+ // numbers that don't fit in a signed int32, infinities and NaNs.
+ __ j(less, &normal_exponent);
+
+ {
+ // Handle a big exponent. The only reason we have this code is that the
+ // >>> operator has a tendency to generate numbers with an exponent of 31.
+ const uint32_t big_non_smi_exponent =
+ (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift;
+ __ cmpl(scratch2, Immediate(big_non_smi_exponent));
+ __ j(not_equal, conversion_failure);
+ // We have the big exponent, typically from >>>. This means the number is
+ // in the range 2^31 to 2^32 - 1. Get the top bits of the mantissa.
+ __ movl(scratch2, scratch);
+ __ and_(scratch2, Immediate(HeapNumber::kMantissaMask));
+ // Put back the implicit 1.
+ __ or_(scratch2, Immediate(1 << HeapNumber::kExponentShift));
+ // Shift up the mantissa bits to take up the space the exponent used to
+ // take. We just orred in the implicit bit so that took care of one and
+ // we want to use the full unsigned range so we subtract 1 bit from the
+ // shift distance.
+ const int big_shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 1;
+ __ shl(scratch2, Immediate(big_shift_distance));
+ // Get the second half of the double.
+ __ movl(rcx, FieldOperand(source, HeapNumber::kMantissaOffset));
+ // Shift down 21 bits to get the most significant 11 bits or the low
+ // mantissa word.
+ __ shr(rcx, Immediate(32 - big_shift_distance));
+ __ or_(rcx, scratch2);
+ // We have the answer in rcx, but we may need to negate it.
+ __ testl(scratch, scratch);
+ __ j(positive, &done);
+ __ neg(rcx);
+ __ jmp(&done);
+ }
+
+ __ bind(&normal_exponent);
+ // Exponent word in scratch, exponent part of exponent word in scratch2.
+ // Zero in rcx.
+ // We know the exponent is smaller than 30 (biased). If it is less than
+ // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
+ // it rounds to zero.
+ const uint32_t zero_exponent =
+ (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
+ __ subl(scratch2, Immediate(zero_exponent));
+ // rcx already has a Smi zero.
+ __ j(less, &done);
+
+ // We have a shifted exponent between 0 and 30 in scratch2.
+ __ shr(scratch2, Immediate(HeapNumber::kExponentShift));
+ __ movl(rcx, Immediate(30));
+ __ subl(rcx, scratch2);
+
+ __ bind(&right_exponent);
+ // Here rcx is the shift, scratch is the exponent word.
+ // Get the top bits of the mantissa.
+ __ and_(scratch, Immediate(HeapNumber::kMantissaMask));
+ // Put back the implicit 1.
+ __ or_(scratch, Immediate(1 << HeapNumber::kExponentShift));
+ // Shift up the mantissa bits to take up the space the exponent used to
+ // take. We have kExponentShift + 1 significant bits int he low end of the
+ // word. Shift them to the top bits.
+ const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
+ __ shl(scratch, Immediate(shift_distance));
+ // Get the second half of the double. For some exponents we don't
+ // actually need this because the bits get shifted out again, but
+ // it's probably slower to test than just to do it.
+ __ movl(scratch2, FieldOperand(source, HeapNumber::kMantissaOffset));
+ // Shift down 22 bits to get the most significant 10 bits or the low
+ // mantissa word.
+ __ shr(scratch2, Immediate(32 - shift_distance));
+ __ or_(scratch2, scratch);
+ // Move down according to the exponent.
+ __ shr_cl(scratch2);
+ // Now the unsigned answer is in scratch2. We need to move it to rcx and
+ // we may need to fix the sign.
+ Label negative;
+ __ xor_(rcx, rcx);
+ __ cmpl(rcx, FieldOperand(source, HeapNumber::kExponentOffset));
+ __ j(greater, &negative);
+ __ movl(rcx, scratch2);
+ __ jmp(&done);
+ __ bind(&negative);
+ __ subl(rcx, scratch2);
+ __ bind(&done);
+ }
+}
+
+
void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
- ASSERT(op_ == Token::SUB);
+ Label slow, done;
+
+ if (op_ == Token::SUB) {
+ // Check whether the value is a smi.
+ Label try_float;
+ __ JumpIfNotSmi(rax, &try_float);
+
+ // Enter runtime system if the value of the smi is zero
+ // to make sure that we switch between 0 and -0.
+ // Also enter it if the value of the smi is Smi::kMinValue.
+ __ SmiNeg(rax, rax, &done);
+
+ // Either zero or Smi::kMinValue, neither of which become a smi when
+ // negated.
+ __ SmiCompare(rax, Smi::FromInt(0));
+ __ j(not_equal, &slow);
+ __ Move(rax, Factory::minus_zero_value());
+ __ jmp(&done);
+
+ // Try floating point case.
+ __ bind(&try_float);
+ __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &slow);
+ // Operand is a float, negate its value by flipping sign bit.
+ __ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ movq(kScratchRegister, Immediate(0x01));
+ __ shl(kScratchRegister, Immediate(63));
+ __ xor_(rdx, kScratchRegister); // Flip sign.
+ // rdx is value to store.
+ if (overwrite_) {
+ __ movq(FieldOperand(rax, HeapNumber::kValueOffset), rdx);
+ } else {
+ __ AllocateHeapNumber(rcx, rbx, &slow);
+ // rcx: allocated 'empty' number
+ __ movq(FieldOperand(rcx, HeapNumber::kValueOffset), rdx);
+ __ movq(rax, rcx);
+ }
+ } else if (op_ == Token::BIT_NOT) {
+ // Check if the operand is a heap number.
+ __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &slow);
- Label slow;
- Label done;
- Label try_float;
- // Check whether the value is a smi.
- __ JumpIfNotSmi(rax, &try_float);
+ // Convert the heap number in rax to an untagged integer in rcx.
+ IntegerConvert(masm, rax, CpuFeatures::IsSupported(SSE3), &slow);
- // Enter runtime system if the value of the smi is zero
- // to make sure that we switch between 0 and -0.
- // Also enter it if the value of the smi is Smi::kMinValue.
- __ SmiNeg(rax, rax, &done);
+ // Do the bitwise operation and check if the result fits in a smi.
+ Label try_float;
+ __ not_(rcx);
+ // Tag the result as a smi and we're done.
+ ASSERT(kSmiTagSize == 1);
+ __ Integer32ToSmi(rax, rcx);
+ }
- // Either zero or Smi::kMinValue, neither of which become a smi when negated.
- __ SmiCompare(rax, Smi::FromInt(0));
- __ j(not_equal, &slow);
- __ Move(rax, Factory::minus_zero_value());
- __ jmp(&done);
+ // Return from the stub.
+ __ bind(&done);
+ __ StubReturn(1);
- // Enter runtime system.
+ // Handle the slow case by jumping to the JavaScript builtin.
__ bind(&slow);
__ pop(rcx); // pop return address
__ push(rax);
__ push(rcx); // push return address
- __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
- __ jmp(&done);
+ switch (op_) {
+ case Token::SUB:
+ __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
+ break;
+ case Token::BIT_NOT:
+ __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
- // Try floating point case.
- __ bind(&try_float);
- __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
- __ Cmp(rdx, Factory::heap_number_map());
- __ j(not_equal, &slow);
- // Operand is a float, negate its value by flipping sign bit.
- __ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset));
- __ movq(kScratchRegister, Immediate(0x01));
- __ shl(kScratchRegister, Immediate(63));
- __ xor_(rdx, kScratchRegister); // Flip sign.
- // rdx is value to store.
- if (overwrite_) {
- __ movq(FieldOperand(rax, HeapNumber::kValueOffset), rdx);
- } else {
- __ AllocateHeapNumber(rcx, rbx, &slow);
- // rcx: allocated 'empty' number
- __ movq(FieldOperand(rcx, HeapNumber::kValueOffset), rdx);
- __ movq(rax, rcx);
+
+void RegExpExecStub::Generate(MacroAssembler* masm) {
+ // Just jump directly to runtime if native RegExp is not selected at compile
+ // time or if regexp entry in generated code is turned off runtime switch or
+ // at compilation.
+#ifndef V8_NATIVE_REGEXP
+ __ TailCallRuntime(ExternalReference(Runtime::kRegExpExec), 4, 1);
+#else // V8_NATIVE_REGEXP
+ if (!FLAG_regexp_entry_native) {
+ __ TailCallRuntime(ExternalReference(Runtime::kRegExpExec), 4, 1);
+ return;
}
+ // Stack frame on entry.
+ // esp[0]: return address
+ // esp[8]: last_match_info (expected JSArray)
+ // esp[16]: previous index
+ // esp[24]: subject string
+ // esp[32]: JSRegExp object
+
+ static const int kLastMatchInfoOffset = 1 * kPointerSize;
+ static const int kPreviousIndexOffset = 2 * kPointerSize;
+ static const int kSubjectOffset = 3 * kPointerSize;
+ static const int kJSRegExpOffset = 4 * kPointerSize;
+
+ Label runtime;
+
+ // Ensure that a RegExp stack is allocated.
+ ExternalReference address_of_regexp_stack_memory_address =
+ ExternalReference::address_of_regexp_stack_memory_address();
+ ExternalReference address_of_regexp_stack_memory_size =
+ ExternalReference::address_of_regexp_stack_memory_size();
+ __ movq(kScratchRegister, address_of_regexp_stack_memory_size);
+ __ movq(kScratchRegister, Operand(kScratchRegister, 0));
+ __ testq(kScratchRegister, kScratchRegister);
+ __ j(zero, &runtime);
+
+
+ // Check that the first argument is a JSRegExp object.
+ __ movq(rax, Operand(rsp, kJSRegExpOffset));
+ __ JumpIfSmi(rax, &runtime);
+ __ CmpObjectType(rax, JS_REGEXP_TYPE, kScratchRegister);
+ __ j(not_equal, &runtime);
+ // Check that the RegExp has been compiled (data contains a fixed array).
+ __ movq(rcx, FieldOperand(rax, JSRegExp::kDataOffset));
+ if (FLAG_debug_code) {
+ Condition is_smi = masm->CheckSmi(rcx);
+ __ Check(NegateCondition(is_smi),
+ "Unexpected type for RegExp data, FixedArray expected");
+ __ CmpObjectType(rcx, FIXED_ARRAY_TYPE, kScratchRegister);
+ __ Check(equal, "Unexpected type for RegExp data, FixedArray expected");
+ }
+
+ // rcx: RegExp data (FixedArray)
+ // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
+ __ movq(rbx, FieldOperand(rcx, JSRegExp::kDataTagOffset));
+ __ SmiCompare(rbx, Smi::FromInt(JSRegExp::IRREGEXP));
+ __ j(not_equal, &runtime);
+
+ // rcx: RegExp data (FixedArray)
+ // Check that the number of captures fit in the static offsets vector buffer.
+ __ movq(rdx, FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
+ // Calculate number of capture registers (number_of_captures + 1) * 2.
+ __ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rdx, 1);
+ __ addq(rdx, Immediate(2)); // rdx was number_of_captures * 2.
+ // Check that the static offsets vector buffer is large enough.
+ __ cmpq(rdx, Immediate(OffsetsVector::kStaticOffsetsVectorSize));
+ __ j(above, &runtime);
+
+ // rcx: RegExp data (FixedArray)
+ // rdx: Number of capture registers
+ // Check that the second argument is a string.
+ __ movq(rax, Operand(rsp, kSubjectOffset));
+ __ JumpIfSmi(rax, &runtime);
+ Condition is_string = masm->IsObjectStringType(rax, rbx, rbx);
+ __ j(NegateCondition(is_string), &runtime);
+ // Get the length of the string to rbx.
+ __ movl(rbx, FieldOperand(rax, String::kLengthOffset));
+
+ // rbx: Length of subject string
+ // rcx: RegExp data (FixedArray)
+ // rdx: Number of capture registers
+ // Check that the third argument is a positive smi less than the string
+ // length. A negative value will be greater (usigned comparison).
+ __ movq(rax, Operand(rsp, kPreviousIndexOffset));
+ __ SmiToInteger32(rax, rax);
+ __ cmpl(rax, rbx);
+ __ j(above, &runtime);
+
+ // rcx: RegExp data (FixedArray)
+ // rdx: Number of capture registers
+ // Check that the fourth object is a JSArray object.
+ __ movq(rax, Operand(rsp, kLastMatchInfoOffset));
+ __ JumpIfSmi(rax, &runtime);
+ __ CmpObjectType(rax, JS_ARRAY_TYPE, kScratchRegister);
+ __ j(not_equal, &runtime);
+ // Check that the JSArray is in fast case.
+ __ movq(rbx, FieldOperand(rax, JSArray::kElementsOffset));
+ __ movq(rax, FieldOperand(rbx, HeapObject::kMapOffset));
+ __ Cmp(rax, Factory::fixed_array_map());
+ __ j(not_equal, &runtime);
+ // Check that the last match info has space for the capture registers and the
+ // additional information. Ensure no overflow in add.
+ ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset);
+ __ movl(rax, FieldOperand(rbx, FixedArray::kLengthOffset));
+ __ addl(rdx, Immediate(RegExpImpl::kLastMatchOverhead));
+ __ cmpl(rdx, rax);
+ __ j(greater, &runtime);
+
+ // ecx: RegExp data (FixedArray)
+ // Check the representation and encoding of the subject string.
+ Label seq_string, seq_two_byte_string, check_code;
+ const int kStringRepresentationEncodingMask =
+ kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
+ __ movq(rax, Operand(rsp, kSubjectOffset));
+ __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
+ __ andb(rbx, Immediate(kStringRepresentationEncodingMask));
+ // First check for sequential string.
+ ASSERT_EQ(0, kStringTag);
+ ASSERT_EQ(0, kSeqStringTag);
+ __ testb(rbx, Immediate(kIsNotStringMask | kStringRepresentationMask));
+ __ j(zero, &seq_string);
+
+ // Check for flat cons string.
+ // A flat cons string is a cons string where the second part is the empty
+ // string. In that case the subject string is just the first part of the cons
+ // string. Also in this case the first part of the cons string is known to be
+ // a sequential string or an external string.
+ __ movl(rdx, rbx);
+ __ andb(rdx, Immediate(kStringRepresentationMask));
+ __ cmpb(rdx, Immediate(kConsStringTag));
+ __ j(not_equal, &runtime);
+ __ movq(rdx, FieldOperand(rax, ConsString::kSecondOffset));
+ __ Cmp(rdx, Factory::empty_string());
+ __ j(not_equal, &runtime);
+ __ movq(rax, FieldOperand(rax, ConsString::kFirstOffset));
+ __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
+ ASSERT_EQ(0, kSeqStringTag);
+ __ testb(rbx, Immediate(kStringRepresentationMask));
+ __ j(not_zero, &runtime);
+ __ andb(rbx, Immediate(kStringRepresentationEncodingMask));
+
+ __ bind(&seq_string);
+ // rax: subject string (sequential either ascii to two byte)
+ // rbx: suject string type & kStringRepresentationEncodingMask
+ // rcx: RegExp data (FixedArray)
+ // Check that the irregexp code has been generated for an ascii string. If
+ // it has, the field contains a code object otherwise it contains the hole.
+ __ cmpb(rbx, Immediate(kStringTag | kSeqStringTag | kTwoByteStringTag));
+ __ j(equal, &seq_two_byte_string);
+ if (FLAG_debug_code) {
+ __ cmpb(rbx, Immediate(kStringTag | kSeqStringTag | kAsciiStringTag));
+ __ Check(equal, "Expected sequential ascii string");
+ }
+ __ movq(r12, FieldOperand(rcx, JSRegExp::kDataAsciiCodeOffset));
+ __ Set(rdi, 1); // Type is ascii.
+ __ jmp(&check_code);
+
+ __ bind(&seq_two_byte_string);
+ // rax: subject string
+ // rcx: RegExp data (FixedArray)
+ __ movq(r12, FieldOperand(rcx, JSRegExp::kDataUC16CodeOffset));
+ __ Set(rdi, 0); // Type is two byte.
+
+ __ bind(&check_code);
+ // Check that the irregexp code has been generated for the actual string
+ // encoding. If it has, the field contains a code object otherwise it contains
+ // the hole.
+ __ CmpObjectType(r12, CODE_TYPE, kScratchRegister);
+ __ j(not_equal, &runtime);
+
+ // rax: subject string
+ // rdi: encoding of subject string (1 if ascii, 0 if two_byte);
+ // r12: code
+ // Load used arguments before starting to push arguments for call to native
+ // RegExp code to avoid handling changing stack height.
+ __ movq(rbx, Operand(rsp, kPreviousIndexOffset));
+ __ SmiToInteger64(rbx, rbx); // Previous index from smi.
+
+ // rax: subject string
+ // rbx: previous index
+ // rdi: encoding of subject string (1 if ascii 0 if two_byte);
+ // r12: code
+ // All checks done. Now push arguments for native regexp code.
+ __ IncrementCounter(&Counters::regexp_entry_native, 1);
+
+ // rsi is caller save on Windows and used to pass parameter on Linux.
+ __ push(rsi);
+
+ static const int kRegExpExecuteArguments = 7;
+ __ PrepareCallCFunction(kRegExpExecuteArguments);
+ int argument_slots_on_stack =
+ masm->ArgumentStackSlotsForCFunctionCall(kRegExpExecuteArguments);
+
+ // Argument 7: Indicate that this is a direct call from JavaScript.
+ __ movq(Operand(rsp, (argument_slots_on_stack - 1) * kPointerSize),
+ Immediate(1));
+
+ // Argument 6: Start (high end) of backtracking stack memory area.
+ __ movq(kScratchRegister, address_of_regexp_stack_memory_address);
+ __ movq(r9, Operand(kScratchRegister, 0));
+ __ movq(kScratchRegister, address_of_regexp_stack_memory_size);
+ __ addq(r9, Operand(kScratchRegister, 0));
+ // Argument 6 passed in r9 on Linux and on the stack on Windows.
+#ifdef _WIN64
+ __ movq(Operand(rsp, (argument_slots_on_stack - 2) * kPointerSize), r9);
+#endif
+
+ // Argument 5: static offsets vector buffer.
+ __ movq(r8, ExternalReference::address_of_static_offsets_vector());
+ // Argument 5 passed in r8 on Linux and on the stack on Windows.
+#ifdef _WIN64
+ __ movq(Operand(rsp, (argument_slots_on_stack - 3) * kPointerSize), r8);
+#endif
+
+ // First four arguments are passed in registers on both Linux and Windows.
+#ifdef _WIN64
+ Register arg4 = r9;
+ Register arg3 = r8;
+ Register arg2 = rdx;
+ Register arg1 = rcx;
+#else
+ Register arg4 = rcx;
+ Register arg3 = rdx;
+ Register arg2 = rsi;
+ Register arg1 = rdi;
+#endif
+
+ // Keep track on aliasing between argX defined above and the registers used.
+ // rax: subject string
+ // rbx: previous index
+ // rdi: encoding of subject string (1 if ascii 0 if two_byte);
+ // r12: code
+
+ // Argument 4: End of string data
+ // Argument 3: Start of string data
+ Label setup_two_byte, setup_rest;
+ __ testb(rdi, rdi);
+ __ movl(rdi, FieldOperand(rax, String::kLengthOffset));
+ __ j(zero, &setup_two_byte);
+ __ lea(arg4, FieldOperand(rax, rdi, times_1, SeqAsciiString::kHeaderSize));
+ __ lea(arg3, FieldOperand(rax, rbx, times_1, SeqAsciiString::kHeaderSize));
+ __ jmp(&setup_rest);
+ __ bind(&setup_two_byte);
+ __ lea(arg4, FieldOperand(rax, rdi, times_2, SeqTwoByteString::kHeaderSize));
+ __ lea(arg3, FieldOperand(rax, rbx, times_2, SeqTwoByteString::kHeaderSize));
+
+ __ bind(&setup_rest);
+ // Argument 2: Previous index.
+ __ movq(arg2, rbx);
+
+ // Argument 1: Subject string.
+ __ movq(arg1, rax);
+
+ // Locate the code entry and call it.
+ __ addq(r12, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ CallCFunction(r12, kRegExpExecuteArguments);
+
+ // rsi is caller save, as it is used to pass parameter.
+ __ pop(rsi);
+
+ // Check the result.
+ Label success;
+ __ cmpq(rax, Immediate(NativeRegExpMacroAssembler::SUCCESS));
+ __ j(equal, &success);
+ Label failure;
+ __ cmpq(rax, Immediate(NativeRegExpMacroAssembler::FAILURE));
+ __ j(equal, &failure);
+ __ cmpq(rax, Immediate(NativeRegExpMacroAssembler::EXCEPTION));
+ // If not exception it can only be retry. Handle that in the runtime system.
+ __ j(not_equal, &runtime);
+ // Result must now be exception. If there is no pending exception already a
+ // stack overflow (on the backtrack stack) was detected in RegExp code but
+ // haven't created the exception yet. Handle that in the runtime system.
+ // TODO(592) Rerunning the RegExp to get the stack overflow exception.
+ ExternalReference pending_exception_address(Top::k_pending_exception_address);
+ __ movq(kScratchRegister, pending_exception_address);
+ __ Cmp(kScratchRegister, Factory::the_hole_value());
+ __ j(equal, &runtime);
+ __ bind(&failure);
+ // For failure and exception return null.
+ __ Move(rax, Factory::null_value());
+ __ ret(4 * kPointerSize);
+
+ // Load RegExp data.
+ __ bind(&success);
+ __ movq(rax, Operand(rsp, kJSRegExpOffset));
+ __ movq(rcx, FieldOperand(rax, JSRegExp::kDataOffset));
+ __ movq(rdx, FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
+ // Calculate number of capture registers (number_of_captures + 1) * 2.
+ __ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rdx, 1);
+ __ addq(rdx, Immediate(2)); // rdx was number_of_captures * 2.
+
+ // rdx: Number of capture registers
+ // Load last_match_info which is still known to be a fast case JSArray.
+ __ movq(rax, Operand(rsp, kLastMatchInfoOffset));
+ __ movq(rbx, FieldOperand(rax, JSArray::kElementsOffset));
+
+ // rbx: last_match_info backing store (FixedArray)
+ // rdx: number of capture registers
+ // Store the capture count.
+ __ Integer32ToSmi(kScratchRegister, rdx);
+ __ movq(FieldOperand(rbx, RegExpImpl::kLastCaptureCountOffset),
+ kScratchRegister);
+ // Store last subject and last input.
+ __ movq(rax, Operand(rsp, kSubjectOffset));
+ __ movq(FieldOperand(rbx, RegExpImpl::kLastSubjectOffset), rax);
+ __ movq(rcx, rbx);
+ __ RecordWrite(rcx, RegExpImpl::kLastSubjectOffset, rax, rdi);
+ __ movq(rax, Operand(rsp, kSubjectOffset));
+ __ movq(FieldOperand(rbx, RegExpImpl::kLastInputOffset), rax);
+ __ movq(rcx, rbx);
+ __ RecordWrite(rcx, RegExpImpl::kLastInputOffset, rax, rdi);
+
+ // Get the static offsets vector filled by the native regexp code.
+ __ movq(rcx, ExternalReference::address_of_static_offsets_vector());
+
+ // rbx: last_match_info backing store (FixedArray)
+ // rcx: offsets vector
+ // rdx: number of capture registers
+ Label next_capture, done;
+ __ movq(rax, Operand(rsp, kPreviousIndexOffset));
+ // Capture register counter starts from number of capture registers and
+ // counts down until wraping after zero.
+ __ bind(&next_capture);
+ __ subq(rdx, Immediate(1));
+ __ j(negative, &done);
+ // Read the value from the static offsets vector buffer and make it a smi.
+ __ movl(rdi, Operand(rcx, rdx, times_int_size, 0));
+ __ Integer32ToSmi(rdi, rdi, &runtime);
+ // Add previous index (from its stack slot) if value is not negative.
+ Label capture_negative;
+ // Negative flag set by smi convertion above.
+ __ j(negative, &capture_negative);
+ __ SmiAdd(rdi, rdi, rax, &runtime); // Add previous index.
+ __ bind(&capture_negative);
+ // Store the smi value in the last match info.
+ __ movq(FieldOperand(rbx,
+ rdx,
+ times_pointer_size,
+ RegExpImpl::kFirstCaptureOffset),
+ rdi);
+ __ jmp(&next_capture);
__ bind(&done);
- __ StubReturn(1);
+
+ // Return last match info.
+ __ movq(rax, Operand(rsp, kLastMatchInfoOffset));
+ __ ret(4 * kPointerSize);
+
+ // Do the runtime call to execute the regexp.
+ __ bind(&runtime);
+ __ TailCallRuntime(ExternalReference(Runtime::kRegExpExec), 4, 1);
+#endif // V8_NATIVE_REGEXP
}
@@ -6496,9 +7080,10 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Fast negative check for symbol-to-symbol equality.
__ bind(&check_for_symbols);
+ Label check_for_strings;
if (cc_ == equal) {
- BranchIfNonSymbol(masm, &call_builtin, rax, kScratchRegister);
- BranchIfNonSymbol(masm, &call_builtin, rdx, kScratchRegister);
+ BranchIfNonSymbol(masm, &check_for_strings, rax, kScratchRegister);
+ BranchIfNonSymbol(masm, &check_for_strings, rdx, kScratchRegister);
// We've already checked for object identity, so if both operands
// are symbols they aren't equal. Register eax (not rax) already holds a
@@ -6506,6 +7091,23 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ ret(2 * kPointerSize);
}
+ __ bind(&check_for_strings);
+
+ __ JumpIfNotBothSequentialAsciiStrings(rdx, rax, rcx, rbx, &call_builtin);
+
+ // Inline comparison of ascii strings.
+ StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
+ rdx,
+ rax,
+ rcx,
+ rbx,
+ rdi,
+ r8);
+
+#ifdef DEBUG
+ __ Abort("Unexpected fall-through from string comparison");
+#endif
+
__ bind(&call_builtin);
// must swap argument order
__ pop(rcx);
@@ -6558,6 +7160,7 @@ void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
// Call the function just below TOS on the stack with the given
// arguments. The receiver is the TOS.
void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
+ CallFunctionFlags flags,
int position) {
// Push the arguments ("left-to-right") on the stack.
int arg_count = args->length();
@@ -6570,7 +7173,7 @@ void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
// Use the shared code stub to call the function.
InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
- CallFunctionStub call_function(arg_count, in_loop);
+ CallFunctionStub call_function(arg_count, in_loop, flags);
Result answer = frame_->CallStub(&call_function, arg_count + 1);
// Restore context and replace function on the stack with the
// result of the stub invocation.
@@ -6741,23 +7344,6 @@ void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* masm) {
}
-int CEntryStub::MinorKey() {
- ASSERT(result_size_ <= 2);
-#ifdef _WIN64
- // Simple results returned in rax (using default code).
- // Complex results must be written to address passed as first argument.
- // Use even numbers for minor keys, reserving the odd numbers for
- // CEntryDebugBreakStub.
- return (result_size_ < 2) ? 0 : result_size_ * 2;
-#else
- // Single results returned in rax (both AMD64 and Win64 calling conventions)
- // and a struct of two pointers in rax+rdx (AMD64 calling convention only)
- // by default.
- return 0;
-#endif
-}
-
-
void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
// Check that stack should contain next handler, frame pointer, state and
// return address in that order.
@@ -6791,7 +7377,6 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
Label* throw_normal_exception,
Label* throw_termination_exception,
Label* throw_out_of_memory_exception,
- ExitFrame::Mode mode,
bool do_gc,
bool always_allocate_scope) {
// rax: result parameter for PerformGC, if any.
@@ -6803,6 +7388,10 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// This pointer is reused in LeaveExitFrame(), so it is stored in a
// callee-saved register.
+ // Simple results returned in rax (both AMD64 and Win64 calling conventions).
+ // Complex results must be written to address passed as first argument.
+ // AMD64 calling convention: a struct of two pointers in rax+rdx
+
if (do_gc) {
// Pass failure code returned from last attempt as first argument to GC.
#ifdef _WIN64
@@ -6874,7 +7463,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ j(zero, &failure_returned);
// Exit the JavaScript to C++ exit frame.
- __ LeaveExitFrame(mode, result_size_);
+ __ LeaveExitFrame(mode_, result_size_);
__ ret(0);
// Handling of failure.
@@ -6968,6 +7557,32 @@ void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
void CallFunctionStub::Generate(MacroAssembler* masm) {
Label slow;
+ // If the receiver might be a value (string, number or boolean) check for this
+ // and box it if it is.
+ if (ReceiverMightBeValue()) {
+ // Get the receiver from the stack.
+ // +1 ~ return address
+ Label receiver_is_value, receiver_is_js_object;
+ __ movq(rax, Operand(rsp, (argc_ + 1) * kPointerSize));
+
+ // Check if receiver is a smi (which is a number value).
+ __ JumpIfSmi(rax, &receiver_is_value);
+
+ // Check if the receiver is a valid JS object.
+ __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rdi);
+ __ j(above_equal, &receiver_is_js_object);
+
+ // Call the runtime to box the value.
+ __ bind(&receiver_is_value);
+ __ EnterInternalFrame();
+ __ push(rax);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ LeaveInternalFrame();
+ __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rax);
+
+ __ bind(&receiver_is_js_object);
+ }
+
// Get the function to call from the stack.
// +2 ~ receiver, return address
__ movq(rdi, Operand(rsp, (argc_ + 2) * kPointerSize));
@@ -6992,7 +7607,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
}
-void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
+void CEntryStub::Generate(MacroAssembler* masm) {
// rax: number of arguments including receiver
// rbx: pointer to C function (C callee-saved)
// rbp: frame pointer of calling JS frame (restored after C call)
@@ -7004,12 +7619,8 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
// this by performing a garbage collection and retrying the
// builtin once.
- ExitFrame::Mode mode = is_debug_break ?
- ExitFrame::MODE_DEBUG :
- ExitFrame::MODE_NORMAL;
-
// Enter the exit frame that transitions from JavaScript to C++.
- __ EnterExitFrame(mode, result_size_);
+ __ EnterExitFrame(mode_, result_size_);
// rax: Holds the context at this point, but should not be used.
// On entry to code generated by GenerateCore, it must hold
@@ -7032,7 +7643,6 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
&throw_normal_exception,
&throw_termination_exception,
&throw_out_of_memory_exception,
- mode,
false,
false);
@@ -7041,7 +7651,6 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
&throw_normal_exception,
&throw_termination_exception,
&throw_out_of_memory_exception,
- mode,
true,
false);
@@ -7052,7 +7661,6 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
&throw_normal_exception,
&throw_termination_exception,
&throw_out_of_memory_exception,
- mode,
true,
true);
@@ -7244,48 +7852,75 @@ void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
XMMRegister dst1,
XMMRegister dst2) {
- __ movq(kScratchRegister, Operand(rsp, 2 * kPointerSize));
+ __ movq(kScratchRegister, rdx);
LoadFloatOperand(masm, kScratchRegister, dst1);
- __ movq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
+ __ movq(kScratchRegister, rax);
LoadFloatOperand(masm, kScratchRegister, dst2);
}
-void FloatingPointHelper::LoadInt32Operand(MacroAssembler* masm,
- const Operand& src,
- Register dst) {
- // TODO(X64): Convert number operands to int32 values.
- // Don't convert a Smi to a double first.
- UNIMPLEMENTED();
-}
-
-
-void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm) {
- Label load_smi_1, load_smi_2, done_load_1, done;
- __ movq(kScratchRegister, Operand(rsp, 2 * kPointerSize));
- __ JumpIfSmi(kScratchRegister, &load_smi_1);
- __ fld_d(FieldOperand(kScratchRegister, HeapNumber::kValueOffset));
- __ bind(&done_load_1);
-
- __ movq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
- __ JumpIfSmi(kScratchRegister, &load_smi_2);
- __ fld_d(FieldOperand(kScratchRegister, HeapNumber::kValueOffset));
+void FloatingPointHelper::LoadFloatOperandsFromSmis(MacroAssembler* masm,
+ XMMRegister dst1,
+ XMMRegister dst2) {
+ __ SmiToInteger32(kScratchRegister, rdx);
+ __ cvtlsi2sd(dst1, kScratchRegister);
+ __ SmiToInteger32(kScratchRegister, rax);
+ __ cvtlsi2sd(dst2, kScratchRegister);
+}
+
+
+// Input: rdx, rax are the left and right objects of a bit op.
+// Output: rax, rcx are left and right integers for a bit op.
+void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
+ bool use_sse3,
+ Label* conversion_failure) {
+ // Check float operands.
+ Label arg1_is_object, check_undefined_arg1;
+ Label arg2_is_object, check_undefined_arg2;
+ Label load_arg2, done;
+
+ __ JumpIfNotSmi(rdx, &arg1_is_object);
+ __ SmiToInteger32(rdx, rdx);
+ __ jmp(&load_arg2);
+
+ // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
+ __ bind(&check_undefined_arg1);
+ __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
+ __ j(not_equal, conversion_failure);
+ __ movl(rdx, Immediate(0));
+ __ jmp(&load_arg2);
+
+ __ bind(&arg1_is_object);
+ __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ CompareRoot(rbx, Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &check_undefined_arg1);
+ // Get the untagged integer version of the edx heap number in rcx.
+ IntegerConvert(masm, rdx, use_sse3, conversion_failure);
+ __ movl(rdx, rcx);
+
+ // Here edx has the untagged integer, eax has a Smi or a heap number.
+ __ bind(&load_arg2);
+ // Test if arg2 is a Smi.
+ __ JumpIfNotSmi(rax, &arg2_is_object);
+ __ SmiToInteger32(rax, rax);
+ __ movl(rcx, rax);
__ jmp(&done);
- __ bind(&load_smi_1);
- __ SmiToInteger32(kScratchRegister, kScratchRegister);
- __ push(kScratchRegister);
- __ fild_s(Operand(rsp, 0));
- __ pop(kScratchRegister);
- __ jmp(&done_load_1);
-
- __ bind(&load_smi_2);
- __ SmiToInteger32(kScratchRegister, kScratchRegister);
- __ push(kScratchRegister);
- __ fild_s(Operand(rsp, 0));
- __ pop(kScratchRegister);
+ // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
+ __ bind(&check_undefined_arg2);
+ __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
+ __ j(not_equal, conversion_failure);
+ __ movl(rcx, Immediate(0));
+ __ jmp(&done);
+ __ bind(&arg2_is_object);
+ __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ CompareRoot(rbx, Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &check_undefined_arg2);
+ // Get the untagged integer version of the eax heap number in ecx.
+ IntegerConvert(masm, rax, use_sse3, conversion_failure);
__ bind(&done);
+ __ movl(rax, rdx);
}
@@ -7484,94 +8119,188 @@ void GenericBinaryOpStub::GenerateCall(
}
+Result GenericBinaryOpStub::GenerateCall(MacroAssembler* masm,
+ VirtualFrame* frame,
+ Result* left,
+ Result* right) {
+ if (ArgsInRegistersSupported()) {
+ SetArgsInRegisters();
+ return frame->CallStub(this, left, right);
+ } else {
+ frame->Push(left);
+ frame->Push(right);
+ return frame->CallStub(this, 2);
+ }
+}
+
+
void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
- // Perform fast-case smi code for the operation (rax <op> rbx) and
- // leave result in register rax.
+ // 1. Move arguments into edx, eax except for DIV and MOD, which need the
+ // dividend in eax and edx free for the division. Use eax, ebx for those.
+ Comment load_comment(masm, "-- Load arguments");
+ Register left = rdx;
+ Register right = rax;
+ if (op_ == Token::DIV || op_ == Token::MOD) {
+ left = rax;
+ right = rbx;
+ if (HasArgsInRegisters()) {
+ __ movq(rbx, rax);
+ __ movq(rax, rdx);
+ }
+ }
+ if (!HasArgsInRegisters()) {
+ __ movq(right, Operand(rsp, 1 * kPointerSize));
+ __ movq(left, Operand(rsp, 2 * kPointerSize));
+ }
- // Smi check both operands.
- __ JumpIfNotBothSmi(rax, rbx, slow);
+ // 2. Smi check both operands. Skip the check for OR as it is better combined
+ // with the actual operation.
+ Label not_smis;
+ if (op_ != Token::BIT_OR) {
+ Comment smi_check_comment(masm, "-- Smi check arguments");
+ __ JumpIfNotBothSmi(left, right, &not_smis);
+ }
+ // 3. Operands are both smis (except for OR), perform the operation leaving
+ // the result in rax and check the result if necessary.
+ Comment perform_smi(masm, "-- Perform smi operation");
+ Label use_fp_on_smis;
switch (op_) {
case Token::ADD: {
- __ SmiAdd(rax, rax, rbx, slow);
+ ASSERT(right.is(rax));
+ __ SmiAdd(right, right, left, &use_fp_on_smis); // ADD is commutative.
break;
}
case Token::SUB: {
- __ SmiSub(rax, rax, rbx, slow);
+ __ SmiSub(left, left, right, &use_fp_on_smis);
+ __ movq(rax, left);
break;
}
case Token::MUL:
- __ SmiMul(rax, rax, rbx, slow);
+ ASSERT(right.is(rax));
+ __ SmiMul(right, right, left, &use_fp_on_smis); // MUL is commutative.
break;
case Token::DIV:
- __ SmiDiv(rax, rax, rbx, slow);
+ ASSERT(left.is(rax));
+ __ SmiDiv(left, left, right, &use_fp_on_smis);
break;
case Token::MOD:
- __ SmiMod(rax, rax, rbx, slow);
+ ASSERT(left.is(rax));
+ __ SmiMod(left, left, right, slow);
break;
case Token::BIT_OR:
- __ SmiOr(rax, rax, rbx);
+ ASSERT(right.is(rax));
+ __ movq(rcx, right); // Save the right operand.
+ __ SmiOr(right, right, left); // BIT_OR is commutative.
+ __ testb(right, Immediate(kSmiTagMask));
+ __ j(not_zero, &not_smis);
break;
case Token::BIT_AND:
- __ SmiAnd(rax, rax, rbx);
+ ASSERT(right.is(rax));
+ __ SmiAnd(right, right, left); // BIT_AND is commutative.
break;
case Token::BIT_XOR:
- __ SmiXor(rax, rax, rbx);
+ ASSERT(right.is(rax));
+ __ SmiXor(right, right, left); // BIT_XOR is commutative.
break;
case Token::SHL:
case Token::SHR:
case Token::SAR:
- // Move the second operand into register ecx.
- __ movq(rcx, rbx);
- // Perform the operation.
switch (op_) {
case Token::SAR:
- __ SmiShiftArithmeticRight(rax, rax, rcx);
+ __ SmiShiftArithmeticRight(left, left, right);
break;
case Token::SHR:
- __ SmiShiftLogicalRight(rax, rax, rcx, slow);
+ __ SmiShiftLogicalRight(left, left, right, slow);
break;
case Token::SHL:
- __ SmiShiftLeft(rax, rax, rcx, slow);
+ __ SmiShiftLeft(left, left, right, slow);
break;
default:
UNREACHABLE();
}
+ __ movq(rax, left);
break;
default:
UNREACHABLE();
break;
}
+
+ // 4. Emit return of result in eax.
+ GenerateReturn(masm);
+
+ // 5. For some operations emit inline code to perform floating point
+ // operations on known smis (e.g., if the result of the operation
+ // overflowed the smi range).
+ switch (op_) {
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV: {
+ __ bind(&use_fp_on_smis);
+ if (op_ == Token::DIV) {
+ __ movq(rdx, rax);
+ __ movq(rax, rbx);
+ }
+ // left is rdx, right is rax.
+ __ AllocateHeapNumber(rbx, rcx, slow);
+ FloatingPointHelper::LoadFloatOperandsFromSmis(masm, xmm4, xmm5);
+ switch (op_) {
+ case Token::ADD: __ addsd(xmm4, xmm5); break;
+ case Token::SUB: __ subsd(xmm4, xmm5); break;
+ case Token::MUL: __ mulsd(xmm4, xmm5); break;
+ case Token::DIV: __ divsd(xmm4, xmm5); break;
+ default: UNREACHABLE();
+ }
+ __ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm4);
+ __ movq(rax, rbx);
+ GenerateReturn(masm);
+ }
+ default:
+ break;
+ }
+
+ // 6. Non-smi operands, fall out to the non-smi code with the operands in
+ // rdx and rax.
+ Comment done_comment(masm, "-- Enter non-smi code");
+ __ bind(&not_smis);
+
+ switch (op_) {
+ case Token::DIV:
+ case Token::MOD:
+ // Operands are in rax, rbx at this point.
+ __ movq(rdx, rax);
+ __ movq(rax, rbx);
+ break;
+
+ case Token::BIT_OR:
+ // Right operand is saved in rcx and rax was destroyed by the smi
+ // operation.
+ __ movq(rax, rcx);
+ break;
+
+ default:
+ break;
+ }
}
void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
Label call_runtime;
if (HasSmiCodeInStub()) {
- // The fast case smi code wasn't inlined in the stub caller
- // code. Generate it here to speed up common operations.
- Label slow;
- __ movq(rbx, Operand(rsp, 1 * kPointerSize)); // get y
- __ movq(rax, Operand(rsp, 2 * kPointerSize)); // get x
- GenerateSmiCode(masm, &slow);
- GenerateReturn(masm);
-
- // Too bad. The fast case smi code didn't succeed.
- __ bind(&slow);
+ GenerateSmiCode(masm, &call_runtime);
+ } else if (op_ != Token::MOD) {
+ GenerateLoadArguments(masm);
}
-
- // Make sure the arguments are in rdx and rax.
- GenerateLoadArguments(masm);
-
// Floating point case.
switch (op_) {
case Token::ADD:
@@ -7582,12 +8311,34 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
// rdx: x
FloatingPointHelper::CheckNumberOperands(masm, &call_runtime);
// Fast-case: Both operands are numbers.
+ // xmm4 and xmm5 are volatile XMM registers.
+ FloatingPointHelper::LoadFloatOperands(masm, xmm4, xmm5);
+
+ switch (op_) {
+ case Token::ADD: __ addsd(xmm4, xmm5); break;
+ case Token::SUB: __ subsd(xmm4, xmm5); break;
+ case Token::MUL: __ mulsd(xmm4, xmm5); break;
+ case Token::DIV: __ divsd(xmm4, xmm5); break;
+ default: UNREACHABLE();
+ }
// Allocate a heap number, if needed.
Label skip_allocation;
- switch (mode_) {
+ OverwriteMode mode = mode_;
+ if (HasArgsReversed()) {
+ if (mode == OVERWRITE_RIGHT) {
+ mode = OVERWRITE_LEFT;
+ } else if (mode == OVERWRITE_LEFT) {
+ mode = OVERWRITE_RIGHT;
+ }
+ }
+ switch (mode) {
case OVERWRITE_LEFT:
+ __ JumpIfNotSmi(rdx, &skip_allocation);
+ __ AllocateHeapNumber(rbx, rcx, &call_runtime);
+ __ movq(rdx, rbx);
+ __ bind(&skip_allocation);
__ movq(rax, rdx);
- // Fall through!
+ break;
case OVERWRITE_RIGHT:
// If the argument in rax is already an object, we skip the
// allocation of a heap number.
@@ -7602,16 +8353,6 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
break;
default: UNREACHABLE();
}
- // xmm4 and xmm5 are volatile XMM registers.
- FloatingPointHelper::LoadFloatOperands(masm, xmm4, xmm5);
-
- switch (op_) {
- case Token::ADD: __ addsd(xmm4, xmm5); break;
- case Token::SUB: __ subsd(xmm4, xmm5); break;
- case Token::MUL: __ mulsd(xmm4, xmm5); break;
- case Token::DIV: __ divsd(xmm4, xmm5); break;
- default: UNREACHABLE();
- }
__ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm4);
GenerateReturn(masm);
}
@@ -7625,44 +8366,8 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
case Token::SAR:
case Token::SHL:
case Token::SHR: {
- FloatingPointHelper::CheckNumberOperands(masm, &call_runtime);
- // TODO(X64): Don't convert a Smi to float and then back to int32
- // afterwards.
- FloatingPointHelper::LoadFloatOperands(masm);
-
- Label skip_allocation, non_smi_result, operand_conversion_failure;
-
- // Reserve space for converted numbers.
- __ subq(rsp, Immediate(2 * kPointerSize));
-
- if (use_sse3_) {
- // Truncate the operands to 32-bit integers and check for
- // exceptions in doing so.
- CpuFeatures::Scope scope(SSE3);
- __ fisttp_s(Operand(rsp, 0 * kPointerSize));
- __ fisttp_s(Operand(rsp, 1 * kPointerSize));
- __ fnstsw_ax();
- __ testl(rax, Immediate(1));
- __ j(not_zero, &operand_conversion_failure);
- } else {
- // Check if right operand is int32.
- __ fist_s(Operand(rsp, 0 * kPointerSize));
- __ fild_s(Operand(rsp, 0 * kPointerSize));
- __ FCmp();
- __ j(not_zero, &operand_conversion_failure);
- __ j(parity_even, &operand_conversion_failure);
-
- // Check if left operand is int32.
- __ fist_s(Operand(rsp, 1 * kPointerSize));
- __ fild_s(Operand(rsp, 1 * kPointerSize));
- __ FCmp();
- __ j(not_zero, &operand_conversion_failure);
- __ j(parity_even, &operand_conversion_failure);
- }
-
- // Get int32 operands and perform bitop.
- __ pop(rcx);
- __ pop(rax);
+ Label skip_allocation, non_smi_result;
+ FloatingPointHelper::LoadAsIntegers(masm, use_sse3_, &call_runtime);
switch (op_) {
case Token::BIT_OR: __ orl(rax, rcx); break;
case Token::BIT_AND: __ andl(rax, rcx); break;
@@ -7710,28 +8415,10 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
GenerateReturn(masm);
}
- // Clear the FPU exception flag and reset the stack before calling
- // the runtime system.
- __ bind(&operand_conversion_failure);
- __ addq(rsp, Immediate(2 * kPointerSize));
- if (use_sse3_) {
- // If we've used the SSE3 instructions for truncating the
- // floating point values to integers and it failed, we have a
- // pending #IA exception. Clear it.
- __ fnclex();
- } else {
- // The non-SSE3 variant does early bailout if the right
- // operand isn't a 32-bit integer, so we may have a single
- // value on the FPU stack we need to get rid of.
- __ ffree(0);
- }
-
// SHR should return uint32 - go to runtime for non-smi/negative result.
if (op_ == Token::SHR) {
__ bind(&non_smi_result);
}
- __ movq(rax, Operand(rsp, 1 * kPointerSize));
- __ movq(rdx, Operand(rsp, 2 * kPointerSize));
break;
}
default: UNREACHABLE(); break;
@@ -7741,9 +8428,9 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
// result. If arguments was passed in registers now place them on the
// stack in the correct order below the return address.
__ bind(&call_runtime);
- if (HasArgumentsInRegisters()) {
+ if (HasArgsInRegisters()) {
__ pop(rcx);
- if (HasArgumentsReversed()) {
+ if (HasArgsReversed()) {
__ push(rax);
__ push(rdx);
} else {
@@ -7758,8 +8445,6 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
Label not_strings, both_strings, not_string1, string1;
Condition is_smi;
Result answer;
- __ movq(rdx, Operand(rsp, 2 * kPointerSize)); // First argument.
- __ movq(rax, Operand(rsp, 1 * kPointerSize)); // Second argument.
is_smi = masm->CheckSmi(rdx);
__ j(is_smi, &not_string1);
__ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, rdx);
@@ -7777,7 +8462,11 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
// Only first argument is a string.
__ bind(&string1);
- __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION);
+ __ InvokeBuiltin(
+ HasArgsReversed() ?
+ Builtins::STRING_ADD_RIGHT :
+ Builtins::STRING_ADD_LEFT,
+ JUMP_FUNCTION);
// First argument was not a string, test second.
__ bind(&not_string1);
@@ -7787,7 +8476,11 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
__ j(above_equal, &not_strings);
// Only second argument is a string.
- __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION);
+ __ InvokeBuiltin(
+ HasArgsReversed() ?
+ Builtins::STRING_ADD_LEFT :
+ Builtins::STRING_ADD_RIGHT,
+ JUMP_FUNCTION);
__ bind(&not_strings);
// Neither argument is a string.
@@ -7799,7 +8492,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
break;
case Token::MUL:
__ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
- break;
+ break;
case Token::DIV:
__ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
break;
@@ -7832,7 +8525,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
// If arguments are not passed in registers read them from the stack.
- if (!HasArgumentsInRegisters()) {
+ if (!HasArgsInRegisters()) {
__ movq(rax, Operand(rsp, 1 * kPointerSize));
__ movq(rdx, Operand(rsp, 2 * kPointerSize));
}
@@ -7842,7 +8535,7 @@ void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) {
// If arguments are not passed in registers remove them from the stack before
// returning.
- if (!HasArgumentsInRegisters()) {
+ if (!HasArgsInRegisters()) {
__ ret(2 * kPointerSize); // Remove both operands
} else {
__ ret(0);
@@ -7945,8 +8638,8 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Both strings are non-empty.
// rax: first string
// rbx: length of first string
- // ecx: length of second string
- // edx: second string
+ // rcx: length of second string
+ // rdx: second string
// r8: instance type of first string if string check was performed above
// r9: instance type of first string if string check was performed above
Label string_add_flat_result;
@@ -8102,11 +8795,11 @@ void StringAddStub::Generate(MacroAssembler* masm) {
}
-void StringAddStub::GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- bool ascii) {
+void StringStubBase::GenerateCopyCharacters(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ bool ascii) {
Label loop;
__ bind(&loop);
// This loop just copies one character at a time, as it is only used for very
@@ -8127,6 +8820,294 @@ void StringAddStub::GenerateCopyCharacters(MacroAssembler* masm,
}
+void StringStubBase::GenerateCopyCharactersREP(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ bool ascii) {
+ // Copy characters using rep movs of doublewords. Align destination on 4 byte
+ // boundary before starting rep movs. Copy remaining characters after running
+ // rep movs.
+ ASSERT(dest.is(rdi)); // rep movs destination
+ ASSERT(src.is(rsi)); // rep movs source
+ ASSERT(count.is(rcx)); // rep movs count
+
+ // Nothing to do for zero characters.
+ Label done;
+ __ testq(count, count);
+ __ j(zero, &done);
+
+ // Make count the number of bytes to copy.
+ if (!ascii) {
+ ASSERT_EQ(2, sizeof(uc16)); // NOLINT
+ __ addq(count, count);
+ }
+
+ // Don't enter the rep movs if there are less than 4 bytes to copy.
+ Label last_bytes;
+ __ testq(count, Immediate(~7));
+ __ j(zero, &last_bytes);
+
+ // Copy from edi to esi using rep movs instruction.
+ __ movq(kScratchRegister, count);
+ __ sar(count, Immediate(3)); // Number of doublewords to copy.
+ __ repmovsq();
+
+ // Find number of bytes left.
+ __ movq(count, kScratchRegister);
+ __ and_(count, Immediate(7));
+
+ // Check if there are more bytes to copy.
+ __ bind(&last_bytes);
+ __ testq(count, count);
+ __ j(zero, &done);
+
+ // Copy remaining characters.
+ Label loop;
+ __ bind(&loop);
+ __ movb(kScratchRegister, Operand(src, 0));
+ __ movb(Operand(dest, 0), kScratchRegister);
+ __ addq(src, Immediate(1));
+ __ addq(dest, Immediate(1));
+ __ subq(count, Immediate(1));
+ __ j(not_zero, &loop);
+
+ __ bind(&done);
+}
+
+
+void SubStringStub::Generate(MacroAssembler* masm) {
+ Label runtime;
+
+ // Stack frame on entry.
+ // rsp[0]: return address
+ // rsp[8]: to
+ // rsp[16]: from
+ // rsp[24]: string
+
+ const int kToOffset = 1 * kPointerSize;
+ const int kFromOffset = kToOffset + kPointerSize;
+ const int kStringOffset = kFromOffset + kPointerSize;
+ const int kArgumentsSize = (kStringOffset + kPointerSize) - kToOffset;
+
+ // Make sure first argument is a string.
+ __ movq(rax, Operand(rsp, kStringOffset));
+ ASSERT_EQ(0, kSmiTag);
+ __ testl(rax, Immediate(kSmiTagMask));
+ __ j(zero, &runtime);
+ Condition is_string = masm->IsObjectStringType(rax, rbx, rbx);
+ __ j(NegateCondition(is_string), &runtime);
+
+ // rax: string
+ // rbx: instance type
+ // Calculate length of sub string using the smi values.
+ __ movq(rcx, Operand(rsp, kToOffset));
+ __ movq(rdx, Operand(rsp, kFromOffset));
+ __ JumpIfNotBothPositiveSmi(rcx, rdx, &runtime);
+
+ __ SmiSub(rcx, rcx, rdx, NULL); // Overflow doesn't happen.
+ __ j(negative, &runtime);
+ // Handle sub-strings of length 2 and less in the runtime system.
+ __ SmiToInteger32(rcx, rcx);
+ __ cmpl(rcx, Immediate(2));
+ __ j(below_equal, &runtime);
+
+ // rax: string
+ // rbx: instance type
+ // rcx: result string length
+ // Check for flat ascii string
+ Label non_ascii_flat;
+ __ and_(rbx, Immediate(kStringRepresentationMask | kStringEncodingMask));
+ __ cmpb(rbx, Immediate(kSeqStringTag | kAsciiStringTag));
+ __ j(not_equal, &non_ascii_flat);
+
+ // Allocate the result.
+ __ AllocateAsciiString(rax, rcx, rbx, rdx, rdi, &runtime);
+
+ // rax: result string
+ // rcx: result string length
+ __ movq(rdx, rsi); // esi used by following code.
+ // Locate first character of result.
+ __ lea(rdi, FieldOperand(rax, SeqAsciiString::kHeaderSize));
+ // Load string argument and locate character of sub string start.
+ __ movq(rsi, Operand(rsp, kStringOffset));
+ __ movq(rbx, Operand(rsp, kFromOffset));
+ {
+ SmiIndex smi_as_index = masm->SmiToIndex(rbx, rbx, times_1);
+ __ lea(rsi, Operand(rsi, smi_as_index.reg, smi_as_index.scale,
+ SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ }
+
+ // rax: result string
+ // rcx: result length
+ // rdx: original value of rsi
+ // rdi: first character of result
+ // rsi: character of sub string start
+ GenerateCopyCharactersREP(masm, rdi, rsi, rcx, true);
+ __ movq(rsi, rdx); // Restore rsi.
+ __ IncrementCounter(&Counters::sub_string_native, 1);
+ __ ret(kArgumentsSize);
+
+ __ bind(&non_ascii_flat);
+ // rax: string
+ // rbx: instance type & kStringRepresentationMask | kStringEncodingMask
+ // rcx: result string length
+ // Check for sequential two byte string
+ __ cmpb(rbx, Immediate(kSeqStringTag | kTwoByteStringTag));
+ __ j(not_equal, &runtime);
+
+ // Allocate the result.
+ __ AllocateTwoByteString(rax, rcx, rbx, rdx, rdi, &runtime);
+
+ // rax: result string
+ // rcx: result string length
+ __ movq(rdx, rsi); // esi used by following code.
+ // Locate first character of result.
+ __ lea(rdi, FieldOperand(rax, SeqTwoByteString::kHeaderSize));
+ // Load string argument and locate character of sub string start.
+ __ movq(rsi, Operand(rsp, kStringOffset));
+ __ movq(rbx, Operand(rsp, kFromOffset));
+ {
+ SmiIndex smi_as_index = masm->SmiToIndex(rbx, rbx, times_2);
+ __ lea(rsi, Operand(rsi, smi_as_index.reg, smi_as_index.scale,
+ SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ }
+
+ // rax: result string
+ // rcx: result length
+ // rdx: original value of rsi
+ // rdi: first character of result
+ // rsi: character of sub string start
+ GenerateCopyCharactersREP(masm, rdi, rsi, rcx, false);
+ __ movq(rsi, rdx); // Restore esi.
+ __ IncrementCounter(&Counters::sub_string_native, 1);
+ __ ret(kArgumentsSize);
+
+ // Just jump to runtime to create the sub string.
+ __ bind(&runtime);
+ __ TailCallRuntime(ExternalReference(Runtime::kSubString), 3, 1);
+}
+
+
+void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4) {
+ // Ensure that you can always subtract a string length from a non-negative
+ // number (e.g. another length).
+ ASSERT(String::kMaxLength < 0x7fffffff);
+
+ // Find minimum length and length difference.
+ __ movl(scratch1, FieldOperand(left, String::kLengthOffset));
+ __ movl(scratch4, scratch1);
+ __ subl(scratch4, FieldOperand(right, String::kLengthOffset));
+ // Register scratch4 now holds left.length - right.length.
+ const Register length_difference = scratch4;
+ Label left_shorter;
+ __ j(less, &left_shorter);
+ // The right string isn't longer that the left one.
+ // Get the right string's length by subtracting the (non-negative) difference
+ // from the left string's length.
+ __ subl(scratch1, length_difference);
+ __ bind(&left_shorter);
+ // Register scratch1 now holds Min(left.length, right.length).
+ const Register min_length = scratch1;
+
+ Label compare_lengths;
+ // If min-length is zero, go directly to comparing lengths.
+ __ testl(min_length, min_length);
+ __ j(zero, &compare_lengths);
+
+ // Registers scratch2 and scratch3 are free.
+ Label result_not_equal;
+ Label loop;
+ {
+ // Check characters 0 .. min_length - 1 in a loop.
+ // Use scratch3 as loop index, min_length as limit and scratch2
+ // for computation.
+ const Register index = scratch3;
+ __ movl(index, Immediate(0)); // Index into strings.
+ __ bind(&loop);
+ // Compare characters.
+ // TODO(lrn): Could we load more than one character at a time?
+ __ movb(scratch2, FieldOperand(left,
+ index,
+ times_1,
+ SeqAsciiString::kHeaderSize));
+ // Increment index and use -1 modifier on next load to give
+ // the previous load extra time to complete.
+ __ addl(index, Immediate(1));
+ __ cmpb(scratch2, FieldOperand(right,
+ index,
+ times_1,
+ SeqAsciiString::kHeaderSize - 1));
+ __ j(not_equal, &result_not_equal);
+ __ cmpl(index, min_length);
+ __ j(not_equal, &loop);
+ }
+ // Completed loop without finding different characters.
+ // Compare lengths (precomputed).
+ __ bind(&compare_lengths);
+ __ testl(length_difference, length_difference);
+ __ j(not_zero, &result_not_equal);
+
+ // Result is EQUAL.
+ __ Move(rax, Smi::FromInt(EQUAL));
+ __ ret(2 * kPointerSize);
+
+ Label result_greater;
+ __ bind(&result_not_equal);
+ // Unequal comparison of left to right, either character or length.
+ __ j(greater, &result_greater);
+
+ // Result is LESS.
+ __ Move(rax, Smi::FromInt(LESS));
+ __ ret(2 * kPointerSize);
+
+ // Result is GREATER.
+ __ bind(&result_greater);
+ __ Move(rax, Smi::FromInt(GREATER));
+ __ ret(2 * kPointerSize);
+}
+
+
+void StringCompareStub::Generate(MacroAssembler* masm) {
+ Label runtime;
+
+ // Stack frame on entry.
+ // rsp[0]: return address
+ // rsp[8]: right string
+ // rsp[16]: left string
+
+ __ movq(rdx, Operand(rsp, 2 * kPointerSize)); // left
+ __ movq(rax, Operand(rsp, 1 * kPointerSize)); // right
+
+ // Check for identity.
+ Label not_same;
+ __ cmpq(rdx, rax);
+ __ j(not_equal, &not_same);
+ __ Move(rax, Smi::FromInt(EQUAL));
+ __ IncrementCounter(&Counters::string_compare_native, 1);
+ __ ret(2 * kPointerSize);
+
+ __ bind(&not_same);
+
+ // Check that both are sequential ASCII strings.
+ __ JumpIfNotBothSequentialAsciiStrings(rdx, rax, rcx, rbx, &runtime);
+
+ // Inline comparison of ascii strings.
+ __ IncrementCounter(&Counters::string_compare_native, 1);
+ GenerateCompareFlatAsciiStrings(masm, rdx, rax, rcx, rbx, rdi, r8);
+
+ // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
+ // tagged as a small integer.
+ __ bind(&runtime);
+ __ TailCallRuntime(ExternalReference(Runtime::kStringCompare), 2, 1);
+}
+
#undef __
#define __ masm.
@@ -8220,6 +9201,7 @@ ModuloFunction CreateModuloFunction() {
#endif
+
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/x64/codegen-x64.h b/deps/v8/src/x64/codegen-x64.h
index fa90f0248..a758e739b 100644
--- a/deps/v8/src/x64/codegen-x64.h
+++ b/deps/v8/src/x64/codegen-x64.h
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -32,6 +32,7 @@ namespace v8 {
namespace internal {
// Forward declarations
+class CompilationInfo;
class DeferredCode;
class RegisterAllocator;
class RegisterFile;
@@ -43,57 +44,70 @@ enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
// -------------------------------------------------------------------------
// Reference support
-// A reference is a C++ stack-allocated object that keeps an ECMA
-// reference on the execution stack while in scope. For variables
-// the reference is empty, indicating that it isn't necessary to
-// store state on the stack for keeping track of references to those.
-// For properties, we keep either one (named) or two (indexed) values
-// on the execution stack to represent the reference.
-
+// A reference is a C++ stack-allocated object that puts a
+// reference on the virtual frame. The reference may be consumed
+// by GetValue, TakeValue, SetValue, and Codegen::UnloadReference.
+// When the lifetime (scope) of a valid reference ends, it must have
+// been consumed, and be in state UNLOADED.
class Reference BASE_EMBEDDED {
public:
// The values of the types is important, see size().
- enum Type { ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
- Reference(CodeGenerator* cgen, Expression* expression);
+ enum Type { UNLOADED = -2, ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
+
+ Reference(CodeGenerator* cgen,
+ Expression* expression,
+ bool persist_after_get = false);
~Reference();
Expression* expression() const { return expression_; }
Type type() const { return type_; }
void set_type(Type value) {
- ASSERT(type_ == ILLEGAL);
+ ASSERT_EQ(ILLEGAL, type_);
type_ = value;
}
+ void set_unloaded() {
+ ASSERT_NE(ILLEGAL, type_);
+ ASSERT_NE(UNLOADED, type_);
+ type_ = UNLOADED;
+ }
// The size the reference takes up on the stack.
- int size() const { return (type_ == ILLEGAL) ? 0 : type_; }
+ int size() const {
+ return (type_ < SLOT) ? 0 : type_;
+ }
bool is_illegal() const { return type_ == ILLEGAL; }
bool is_slot() const { return type_ == SLOT; }
bool is_property() const { return type_ == NAMED || type_ == KEYED; }
+ bool is_unloaded() const { return type_ == UNLOADED; }
// Return the name. Only valid for named property references.
Handle<String> GetName();
// Generate code to push the value of the reference on top of the
// expression stack. The reference is expected to be already on top of
- // the expression stack, and it is left in place with its value above it.
+ // the expression stack, and it is consumed by the call unless the
+ // reference is for a compound assignment.
+ // If the reference is not consumed, it is left in place under its value.
void GetValue();
// Like GetValue except that the slot is expected to be written to before
- // being read from again. Thae value of the reference may be invalidated,
+ // being read from again. The value of the reference may be invalidated,
// causing subsequent attempts to read it to fail.
void TakeValue();
// Generate code to store the value on top of the expression stack in the
// reference. The reference is expected to be immediately below the value
- // on the expression stack. The stored value is left in place (with the
- // reference intact below it) to support chained assignments.
+ // on the expression stack. The value is stored in the location specified
+ // by the reference, and is left on top of the stack, after the reference
+ // is popped from beneath it (unloaded).
void SetValue(InitState init_state);
private:
CodeGenerator* cgen_;
Expression* expression_;
Type type_;
+ bool persist_after_get_;
};
@@ -280,11 +294,21 @@ enum ArgumentsAllocationMode {
class CodeGenerator: public AstVisitor {
public:
+ // Compilation mode. Either the compiler is used as the primary
+ // compiler and needs to setup everything or the compiler is used as
+ // the secondary compiler for split compilation and has to handle
+ // bailouts.
+ enum Mode {
+ PRIMARY,
+ SECONDARY
+ };
+
// Takes a function literal, generates code for it. This function should only
// be called by compiler.cc.
static Handle<Code> MakeCode(FunctionLiteral* fun,
Handle<Script> script,
- bool is_eval);
+ bool is_eval,
+ CompilationInfo* info);
// Printing of AST, etc. as requested by flags.
static void MakeCodePrologue(FunctionLiteral* fun);
@@ -328,8 +352,7 @@ class CodeGenerator: public AstVisitor {
private:
// Construction/Destruction
- CodeGenerator(int buffer_size, Handle<Script> script, bool is_eval);
- virtual ~CodeGenerator() { delete masm_; }
+ CodeGenerator(MacroAssembler* masm, Handle<Script> script, bool is_eval);
// Accessors
Scope* scope() const { return scope_; }
@@ -367,7 +390,7 @@ class CodeGenerator: public AstVisitor {
void VisitStatementsAndSpill(ZoneList<Statement*>* statements);
// Main code generation function
- void GenCode(FunctionLiteral* fun);
+ void Generate(FunctionLiteral* fun, Mode mode, CompilationInfo* info);
// Generate the return sequence code. Should be called no more than
// once per compiled function, immediately after binding the return
@@ -422,6 +445,11 @@ class CodeGenerator: public AstVisitor {
// value in place.
void StoreToSlot(Slot* slot, InitState init_state);
+ // Load a property of an object, returning it in a Result.
+ // The object and the property name are passed on the stack, and
+ // not changed.
+ Result EmitKeyedLoad(bool is_global);
+
// Special code for typeof expressions: Unfortunately, we must
// be careful when loading the expression in 'typeof'
// expressions. We are not allowed to throw reference errors for
@@ -446,20 +474,20 @@ class CodeGenerator: public AstVisitor {
// Emit code to perform a binary operation on a constant
// smi and a likely smi. Consumes the Result *operand.
- void ConstantSmiBinaryOperation(Token::Value op,
- Result* operand,
- Handle<Object> constant_operand,
- StaticType* type,
- bool reversed,
- OverwriteMode overwrite_mode);
+ Result ConstantSmiBinaryOperation(Token::Value op,
+ Result* operand,
+ Handle<Object> constant_operand,
+ StaticType* type,
+ bool reversed,
+ OverwriteMode overwrite_mode);
// Emit code to perform a binary operation on two likely smis.
// The code to handle smi arguments is produced inline.
// Consumes the Results *left and *right.
- void LikelySmiBinaryOperation(Token::Value op,
- Result* left,
- Result* right,
- OverwriteMode overwrite_mode);
+ Result LikelySmiBinaryOperation(Token::Value op,
+ Result* left,
+ Result* right,
+ OverwriteMode overwrite_mode);
void Comparison(Condition cc,
bool strict,
@@ -474,12 +502,14 @@ class CodeGenerator: public AstVisitor {
// at most 16 bits of user-controlled data per assembly operation.
void LoadUnsafeSmi(Register target, Handle<Object> value);
- void CallWithArguments(ZoneList<Expression*>* arguments, int position);
+ void CallWithArguments(ZoneList<Expression*>* arguments,
+ CallFunctionFlags flags,
+ int position);
- // Use an optimized version of Function.prototype.apply that avoid
- // allocating the arguments object and just copies the arguments
- // from the stack.
- void CallApplyLazy(Property* apply,
+ // An optimized implementation of expressions of the form
+ // x.apply(y, arguments). We call x the applicand and y the receiver.
+ // The optimization avoids allocating an arguments object if possible.
+ void CallApplyLazy(Expression* applicand,
Expression* receiver,
VariableProxy* arguments,
int position);
@@ -512,6 +542,7 @@ class CodeGenerator: public AstVisitor {
void GenerateIsArray(ZoneList<Expression*>* args);
void GenerateIsObject(ZoneList<Expression*>* args);
void GenerateIsFunction(ZoneList<Expression*>* args);
+ void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
// Support for construct call checks.
void GenerateIsConstructCall(ZoneList<Expression*>* args);
@@ -609,7 +640,8 @@ class CodeGenerator: public AstVisitor {
friend class Reference;
friend class Result;
friend class FastCodeGenerator;
- friend class CodeGenSelector;
+ friend class FullCodeGenerator;
+ friend class FullCodeGenSyntaxChecker;
friend class CodeGeneratorPatcher; // Used in test-log-stack-tracer.cc
@@ -617,46 +649,6 @@ class CodeGenerator: public AstVisitor {
};
-// -------------------------------------------------------------------------
-// Code stubs
-//
-// These independent code objects are created once, and used multiple
-// times by generated code to perform common tasks, often the slow
-// case of a JavaScript operation. They are all subclasses of CodeStub,
-// which is declared in code-stubs.h.
-class CallFunctionStub: public CodeStub {
- public:
- CallFunctionStub(int argc, InLoopFlag in_loop)
- : argc_(argc), in_loop_(in_loop) { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- int argc_;
- InLoopFlag in_loop_;
-
-#ifdef DEBUG
- void Print() { PrintF("CallFunctionStub (args %d)\n", argc_); }
-#endif
-
- Major MajorKey() { return CallFunction; }
- int MinorKey() { return argc_; }
- InLoopFlag InLoop() { return in_loop_; }
-};
-
-
-class ToBooleanStub: public CodeStub {
- public:
- ToBooleanStub() { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- Major MajorKey() { return ToBoolean; }
- int MinorKey() { return 0; }
-};
-
-
// Flag that indicates how to generate code for the stub GenericBinaryOpStub.
enum GenericBinaryFlags {
NO_GENERIC_BINARY_FLAGS = 0,
@@ -686,6 +678,11 @@ class GenericBinaryOpStub: public CodeStub {
void GenerateCall(MacroAssembler* masm, Register left, Smi* right);
void GenerateCall(MacroAssembler* masm, Smi* left, Register right);
+ Result GenerateCall(MacroAssembler* masm,
+ VirtualFrame* frame,
+ Result* left,
+ Result* right);
+
private:
Token::Value op_;
OverwriteMode mode_;
@@ -734,9 +731,8 @@ class GenericBinaryOpStub: public CodeStub {
void GenerateReturn(MacroAssembler* masm);
bool ArgsInRegistersSupported() {
- return ((op_ == Token::ADD) || (op_ == Token::SUB)
- || (op_ == Token::MUL) || (op_ == Token::DIV))
- && flags_ != NO_SMI_CODE_IN_STUB;
+ return (op_ == Token::ADD) || (op_ == Token::SUB)
+ || (op_ == Token::MUL) || (op_ == Token::DIV);
}
bool IsOperationCommutative() {
return (op_ == Token::ADD) || (op_ == Token::MUL);
@@ -745,8 +741,31 @@ class GenericBinaryOpStub: public CodeStub {
void SetArgsInRegisters() { args_in_registers_ = true; }
void SetArgsReversed() { args_reversed_ = true; }
bool HasSmiCodeInStub() { return (flags_ & NO_SMI_CODE_IN_STUB) == 0; }
- bool HasArgumentsInRegisters() { return args_in_registers_; }
- bool HasArgumentsReversed() { return args_reversed_; }
+ bool HasArgsInRegisters() { return args_in_registers_; }
+ bool HasArgsReversed() { return args_reversed_; }
+};
+
+
+class StringStubBase: public CodeStub {
+ public:
+ // Generate code for copying characters using a simple loop. This should only
+ // be used in places where the number of characters is small and the
+ // additional setup and checking in GenerateCopyCharactersREP adds too much
+ // overhead. Copying of overlapping regions is not supported.
+ void GenerateCopyCharacters(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ bool ascii);
+
+ // Generate code for copying characters using the rep movs instruction.
+ // Copies rcx characters from rsi to rdi. Copying of overlapping regions is
+ // not supported.
+ void GenerateCopyCharactersREP(MacroAssembler* masm,
+ Register dest, // Must be rdi.
+ Register src, // Must be rsi.
+ Register count, // Must be rcx.
+ bool ascii);
};
@@ -757,7 +776,7 @@ enum StringAddFlags {
};
-class StringAddStub: public CodeStub {
+class StringAddStub: public StringStubBase {
public:
explicit StringAddStub(StringAddFlags flags) {
string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0);
@@ -769,17 +788,45 @@ class StringAddStub: public CodeStub {
void Generate(MacroAssembler* masm);
- void GenerateCopyCharacters(MacroAssembler* masm,
- Register desc,
- Register src,
- Register count,
- bool ascii);
-
// Should the stub check whether arguments are strings?
bool string_check_;
};
+class SubStringStub: public StringStubBase {
+ public:
+ SubStringStub() {}
+
+ private:
+ Major MajorKey() { return SubString; }
+ int MinorKey() { return 0; }
+
+ void Generate(MacroAssembler* masm);
+};
+
+
+class StringCompareStub: public CodeStub {
+ public:
+ explicit StringCompareStub() {}
+
+ // Compare two flat ascii strings and returns result in rax after popping two
+ // arguments from the stack.
+ static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4);
+
+ private:
+ Major MajorKey() { return StringCompare; }
+ int MinorKey() { return 0; }
+
+ void Generate(MacroAssembler* masm);
+};
+
+
} } // namespace v8::internal
#endif // V8_X64_CODEGEN_X64_H_
diff --git a/deps/v8/src/x64/debug-x64.cc b/deps/v8/src/x64/debug-x64.cc
index bc88d4668..261b16c01 100644
--- a/deps/v8/src/x64/debug-x64.cc
+++ b/deps/v8/src/x64/debug-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -68,7 +68,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
__ xor_(rax, rax); // No arguments (argc == 0).
__ movq(rbx, ExternalReference::debug_break());
- CEntryDebugBreakStub ceb;
+ CEntryStub ceb(1, ExitFrame::MODE_DEBUG);
__ CallStub(&ceb);
// Restore the register values containing object pointers from the expression
@@ -158,12 +158,13 @@ void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
- // REgister state for IC store call (from ic-x64.cc).
+ // Register state for IC store call (from ic-x64.cc).
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : name
+ // -- rdx : receiver
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, rax.bit() | rcx.bit(), false);
+ Generate_DebugBreakCallHelper(masm, rax.bit() | rcx.bit() | rdx.bit(), false);
}
diff --git a/deps/v8/src/x64/disasm-x64.cc b/deps/v8/src/x64/disasm-x64.cc
index 0b43e766e..ce3aae8a2 100644
--- a/deps/v8/src/x64/disasm-x64.cc
+++ b/deps/v8/src/x64/disasm-x64.cc
@@ -114,6 +114,10 @@ static ByteMnemonic zero_operands_instr[] = {
{ 0x9E, UNSET_OP_ORDER, "sahf" },
{ 0x99, UNSET_OP_ORDER, "cdq" },
{ 0x9B, UNSET_OP_ORDER, "fwait" },
+ { 0xA4, UNSET_OP_ORDER, "movs" },
+ { 0xA5, UNSET_OP_ORDER, "movs" },
+ { 0xA6, UNSET_OP_ORDER, "cmps" },
+ { 0xA7, UNSET_OP_ORDER, "cmps" },
{ -1, UNSET_OP_ORDER, "" }
};
@@ -157,6 +161,16 @@ enum InstructionType {
};
+enum Prefixes {
+ ESCAPE_PREFIX = 0x0F,
+ OPERAND_SIZE_OVERRIDE_PREFIX = 0x66,
+ ADDRESS_SIZE_OVERRIDE_PREFIX = 0x67,
+ REPNE_PREFIX = 0xF2,
+ REP_PREFIX = 0xF3,
+ REPEQ_PREFIX = REP_PREFIX
+};
+
+
struct InstructionDesc {
const char* mnem;
InstructionType type;
@@ -1128,12 +1142,12 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
// Scan for prefixes.
while (true) {
current = *data;
- if (current == 0x66) { // Group 3 prefix.
+ if (current == OPERAND_SIZE_OVERRIDE_PREFIX) { // Group 3 prefix.
operand_size_ = current;
} else if ((current & 0xF0) == 0x40) { // REX prefix.
setRex(current);
if (rex_w()) AppendToBuffer("REX.W ");
- } else if ((current & 0xFE) == 0xF2) { // Group 1 prefix.
+ } else if ((current & 0xFE) == 0xF2) { // Group 1 prefix (0xF2 or 0xF3).
group_1_prefix_ = current;
} else { // Not a prefix - an opcode.
break;
@@ -1145,7 +1159,17 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
byte_size_operand_ = idesc.byte_size_operation;
switch (idesc.type) {
case ZERO_OPERANDS_INSTR:
- AppendToBuffer(idesc.mnem);
+ if (current >= 0xA4 && current <= 0xA7) {
+ // String move or compare operations.
+ if (group_1_prefix_ == REP_PREFIX) {
+ // REP.
+ AppendToBuffer("rep ");
+ }
+ if (rex_w()) AppendToBuffer("REX.W ");
+ AppendToBuffer("%s%c", idesc.mnem, operand_size_code());
+ } else {
+ AppendToBuffer("%s", idesc.mnem, operand_size_code());
+ }
data++;
break;
diff --git a/deps/v8/src/x64/fast-codegen-x64.cc b/deps/v8/src/x64/fast-codegen-x64.cc
index 3ef867802..12b5653e5 100644
--- a/deps/v8/src/x64/fast-codegen-x64.cc
+++ b/deps/v8/src/x64/fast-codegen-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -28,1699 +28,109 @@
#include "v8.h"
#include "codegen-inl.h"
-#include "compiler.h"
-#include "debug.h"
#include "fast-codegen.h"
-#include "parser.h"
namespace v8 {
namespace internal {
-#define __ ACCESS_MASM(masm_)
+#define __ ACCESS_MASM(masm())
-// Generate code for a JS function. On entry to the function the receiver
-// and arguments have been pushed on the stack left to right, with the
-// return address on top of them. The actual argument count matches the
-// formal parameter count expected by the function.
-//
-// The live registers are:
-// o rdi: the JS function object being called (ie, ourselves)
-// o rsi: our context
-// o rbp: our caller's frame pointer
-// o rsp: stack pointer (pointing to return address)
-//
-// The function builds a JS frame. Please see JavaScriptFrameConstants in
-// frames-x64.h for its layout.
-void FastCodeGenerator::Generate(FunctionLiteral* fun) {
- function_ = fun;
- SetFunctionPosition(fun);
-
- __ push(rbp); // Caller's frame pointer.
- __ movq(rbp, rsp);
- __ push(rsi); // Callee's context.
- __ push(rdi); // Callee's JS Function.
-
- { Comment cmnt(masm_, "[ Allocate locals");
- int locals_count = fun->scope()->num_stack_slots();
- if (locals_count <= 1) {
- if (locals_count > 0) {
- __ PushRoot(Heap::kUndefinedValueRootIndex);
- }
- } else {
- __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
- for (int i = 0; i < locals_count; i++) {
- __ push(rdx);
- }
- }
- }
-
- bool function_in_register = true;
-
- // Possibly allocate a local context.
- if (fun->scope()->num_heap_slots() > 0) {
- Comment cmnt(masm_, "[ Allocate local context");
- // Argument to NewContext is the function, which is still in rdi.
- __ push(rdi);
- __ CallRuntime(Runtime::kNewContext, 1);
- function_in_register = false;
- // Context is returned in both rax and rsi. It replaces the context
- // passed to us. It's saved in the stack and kept live in rsi.
- __ movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
-
- // Copy any necessary parameters into the context.
- int num_parameters = fun->scope()->num_parameters();
- for (int i = 0; i < num_parameters; i++) {
- Slot* slot = fun->scope()->parameter(i)->slot();
- if (slot != NULL && slot->type() == Slot::CONTEXT) {
- int parameter_offset = StandardFrameConstants::kCallerSPOffset +
- (num_parameters - 1 - i) * kPointerSize;
- // Load parameter from stack.
- __ movq(rax, Operand(rbp, parameter_offset));
- // Store it in the context
- __ movq(Operand(rsi, Context::SlotOffset(slot->index())), rax);
- }
- }
- }
-
- // Possibly allocate an arguments object.
- Variable* arguments = fun->scope()->arguments()->AsVariable();
- if (arguments != NULL) {
- // Arguments object must be allocated after the context object, in
- // case the "arguments" or ".arguments" variables are in the context.
- Comment cmnt(masm_, "[ Allocate arguments object");
- if (function_in_register) {
- __ push(rdi);
- } else {
- __ push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- }
- // The receiver is just before the parameters on the caller's stack.
- __ lea(rdx, Operand(rbp, StandardFrameConstants::kCallerSPOffset +
- fun->num_parameters() * kPointerSize));
- __ push(rdx);
- __ Push(Smi::FromInt(fun->num_parameters()));
- // Arguments to ArgumentsAccessStub:
- // function, receiver address, parameter count.
- // The stub will rewrite receiver and parameter count if the previous
- // stack frame was an arguments adapter frame.
- ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
- __ CallStub(&stub);
- // Store new arguments object in both "arguments" and ".arguments" slots.
- __ movq(rcx, rax);
- Move(arguments->slot(), rax, rbx, rdx);
- Slot* dot_arguments_slot =
- fun->scope()->arguments_shadow()->AsVariable()->slot();
- Move(dot_arguments_slot, rcx, rbx, rdx);
- }
-
- { Comment cmnt(masm_, "[ Stack check");
- Label ok;
- __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
- __ j(above_equal, &ok);
- StackCheckStub stub;
- __ CallStub(&stub);
- __ bind(&ok);
- }
-
- { Comment cmnt(masm_, "[ Declarations");
- VisitDeclarations(fun->scope()->declarations());
- }
-
- if (FLAG_trace) {
- __ CallRuntime(Runtime::kTraceEnter, 0);
- }
-
- { Comment cmnt(masm_, "[ Body");
- ASSERT(loop_depth() == 0);
- VisitStatements(fun->body());
- ASSERT(loop_depth() == 0);
- }
-
- { Comment cmnt(masm_, "[ return <undefined>;");
- // Emit a 'return undefined' in case control fell off the end of the body.
- __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
- EmitReturnSequence(function_->end_position());
- }
-}
-
-
-void FastCodeGenerator::EmitReturnSequence(int position) {
- Comment cmnt(masm_, "[ Return sequence");
- if (return_label_.is_bound()) {
- __ jmp(&return_label_);
- } else {
- __ bind(&return_label_);
- if (FLAG_trace) {
- __ push(rax);
- __ CallRuntime(Runtime::kTraceExit, 1);
- }
-#ifdef DEBUG
- // Add a label for checking the size of the code used for returning.
- Label check_exit_codesize;
- masm_->bind(&check_exit_codesize);
-#endif
- CodeGenerator::RecordPositions(masm_, position);
- __ RecordJSReturn();
- // Do not use the leave instruction here because it is too short to
- // patch with the code required by the debugger.
- __ movq(rsp, rbp);
- __ pop(rbp);
- __ ret((function_->scope()->num_parameters() + 1) * kPointerSize);
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Add padding that will be overwritten by a debugger breakpoint. We
- // have just generated "movq rsp, rbp; pop rbp; ret k" with length 7
- // (3 + 1 + 3).
- const int kPadding = Assembler::kJSReturnSequenceLength - 7;
- for (int i = 0; i < kPadding; ++i) {
- masm_->int3();
- }
- // Check that the size of the code used for returning matches what is
- // expected by the debugger.
- ASSERT_EQ(Assembler::kJSReturnSequenceLength,
- masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
-#endif
- }
-}
-
-
-void FastCodeGenerator::Apply(Expression::Context context,
- Slot* slot,
- Register scratch) {
- switch (context) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect:
- break;
- case Expression::kValue: {
- MemOperand location = EmitSlotSearch(slot, scratch);
- __ push(location);
- break;
- }
- case Expression::kTest:
- case Expression::kValueTest:
- case Expression::kTestValue:
- Move(scratch, slot);
- Apply(context, scratch);
- break;
- }
-}
-
-
-void FastCodeGenerator::Apply(Expression::Context context, Literal* lit) {
- switch (context) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect:
- break;
- case Expression::kValue:
- __ Push(lit->handle());
- break;
- case Expression::kTest:
- case Expression::kValueTest:
- case Expression::kTestValue:
- __ Move(rax, lit->handle());
- Apply(context, rax);
- break;
- }
-}
-
-
-void FastCodeGenerator::ApplyTOS(Expression::Context context) {
- switch (context) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect:
- __ Drop(1);
- break;
- case Expression::kValue:
- break;
- case Expression::kTest:
- __ pop(rax);
- TestAndBranch(rax, true_label_, false_label_);
- break;
- case Expression::kValueTest: {
- Label discard;
- __ movq(rax, Operand(rsp, 0));
- TestAndBranch(rax, true_label_, &discard);
- __ bind(&discard);
- __ Drop(1);
- __ jmp(false_label_);
- break;
- }
- case Expression::kTestValue: {
- Label discard;
- __ movq(rax, Operand(rsp, 0));
- TestAndBranch(rax, &discard, false_label_);
- __ bind(&discard);
- __ Drop(1);
- __ jmp(true_label_);
- }
- }
-}
-
-
-void FastCodeGenerator::DropAndApply(int count,
- Expression::Context context,
- Register reg) {
- ASSERT(count > 0);
- ASSERT(!reg.is(rsp));
- switch (context) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect:
- __ Drop(count);
- break;
- case Expression::kValue:
- if (count > 1) __ Drop(count - 1);
- __ movq(Operand(rsp, 0), reg);
- break;
- case Expression::kTest:
- __ Drop(count);
- TestAndBranch(reg, true_label_, false_label_);
- break;
- case Expression::kValueTest: {
- Label discard;
- if (count > 1) __ Drop(count - 1);
- __ movq(Operand(rsp, 0), reg);
- TestAndBranch(reg, true_label_, &discard);
- __ bind(&discard);
- __ Drop(1);
- __ jmp(false_label_);
- break;
- }
- case Expression::kTestValue: {
- Label discard;
- if (count > 1) __ Drop(count - 1);
- __ movq(Operand(rsp, 0), reg);
- TestAndBranch(reg, &discard, false_label_);
- __ bind(&discard);
- __ Drop(1);
- __ jmp(true_label_);
- break;
- }
- }
-}
-
-
-MemOperand FastCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
- switch (slot->type()) {
- case Slot::PARAMETER:
- case Slot::LOCAL:
- return Operand(rbp, SlotOffset(slot));
- case Slot::CONTEXT: {
- int context_chain_length =
- function_->scope()->ContextChainLength(slot->var()->scope());
- __ LoadContext(scratch, context_chain_length);
- return CodeGenerator::ContextOperand(scratch, slot->index());
- }
- case Slot::LOOKUP:
- UNREACHABLE();
- }
- UNREACHABLE();
- return Operand(rax, 0);
-}
-
-
-void FastCodeGenerator::Move(Register destination, Slot* source) {
- MemOperand location = EmitSlotSearch(source, destination);
- __ movq(destination, location);
-}
-
-
-void FastCodeGenerator::Move(Slot* dst,
- Register src,
- Register scratch1,
- Register scratch2) {
- ASSERT(dst->type() != Slot::LOOKUP); // Not yet implemented.
- ASSERT(!scratch1.is(src) && !scratch2.is(src));
- MemOperand location = EmitSlotSearch(dst, scratch1);
- __ movq(location, src);
- // Emit the write barrier code if the location is in the heap.
- if (dst->type() == Slot::CONTEXT) {
- int offset = FixedArray::kHeaderSize + dst->index() * kPointerSize;
- __ RecordWrite(scratch1, offset, src, scratch2);
- }
-}
-
-
-void FastCodeGenerator::TestAndBranch(Register source,
- Label* true_label,
- Label* false_label) {
- ASSERT_NE(NULL, true_label);
- ASSERT_NE(NULL, false_label);
- // Use the shared ToBoolean stub to compile the value in the register into
- // control flow to the code generator's true and false labels. Perform
- // the fast checks assumed by the stub.
-
- // The undefined value is false.
- __ CompareRoot(source, Heap::kUndefinedValueRootIndex);
- __ j(equal, false_label);
- __ CompareRoot(source, Heap::kTrueValueRootIndex); // True is true.
- __ j(equal, true_label);
- __ CompareRoot(source, Heap::kFalseValueRootIndex); // False is false.
- __ j(equal, false_label);
- ASSERT_EQ(0, kSmiTag);
- __ SmiCompare(source, Smi::FromInt(0)); // The smi zero is false.
- __ j(equal, false_label);
- Condition is_smi = masm_->CheckSmi(source); // All other smis are true.
- __ j(is_smi, true_label);
-
- // Call the stub for all other cases.
- __ push(source);
- ToBooleanStub stub;
- __ CallStub(&stub);
- __ testq(rax, rax); // The stub returns nonzero for true.
- __ j(not_zero, true_label);
- __ jmp(false_label);
-}
-
-
-void FastCodeGenerator::VisitDeclaration(Declaration* decl) {
- Comment cmnt(masm_, "[ Declaration");
- Variable* var = decl->proxy()->var();
- ASSERT(var != NULL); // Must have been resolved.
- Slot* slot = var->slot();
- Property* prop = var->AsProperty();
-
- if (slot != NULL) {
- switch (slot->type()) {
- case Slot::PARAMETER:
- case Slot::LOCAL:
- if (decl->mode() == Variable::CONST) {
- __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
- __ movq(Operand(rbp, SlotOffset(slot)), kScratchRegister);
- } else if (decl->fun() != NULL) {
- Visit(decl->fun());
- __ pop(Operand(rbp, SlotOffset(slot)));
- }
- break;
-
- case Slot::CONTEXT:
- // We bypass the general EmitSlotSearch because we know more about
- // this specific context.
-
- // The variable in the decl always resides in the current context.
- ASSERT_EQ(0, function_->scope()->ContextChainLength(var->scope()));
- if (FLAG_debug_code) {
- // Check if we have the correct context pointer.
- __ movq(rbx,
- CodeGenerator::ContextOperand(rsi, Context::FCONTEXT_INDEX));
- __ cmpq(rbx, rsi);
- __ Check(equal, "Unexpected declaration in current context.");
- }
- if (decl->mode() == Variable::CONST) {
- __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
- __ movq(CodeGenerator::ContextOperand(rsi, slot->index()),
- kScratchRegister);
- // No write barrier since the hole value is in old space.
- } else if (decl->fun() != NULL) {
- Visit(decl->fun());
- __ pop(rax);
- __ movq(CodeGenerator::ContextOperand(rsi, slot->index()), rax);
- int offset = Context::SlotOffset(slot->index());
- __ RecordWrite(rsi, offset, rax, rcx);
- }
- break;
-
- case Slot::LOOKUP: {
- __ push(rsi);
- __ Push(var->name());
- // Declaration nodes are always introduced in one of two modes.
- ASSERT(decl->mode() == Variable::VAR ||
- decl->mode() == Variable::CONST);
- PropertyAttributes attr =
- (decl->mode() == Variable::VAR) ? NONE : READ_ONLY;
- __ Push(Smi::FromInt(attr));
- // Push initial value, if any.
- // Note: For variables we must not push an initial value (such as
- // 'undefined') because we may have a (legal) redeclaration and we
- // must not destroy the current value.
- if (decl->mode() == Variable::CONST) {
- __ PushRoot(Heap::kTheHoleValueRootIndex);
- } else if (decl->fun() != NULL) {
- Visit(decl->fun());
- } else {
- __ Push(Smi::FromInt(0)); // no initial value!
- }
- __ CallRuntime(Runtime::kDeclareContextSlot, 4);
- break;
- }
- }
-
- } else if (prop != NULL) {
- if (decl->fun() != NULL || decl->mode() == Variable::CONST) {
- // We are declaring a function or constant that rewrites to a
- // property. Use (keyed) IC to set the initial value.
- ASSERT_EQ(Expression::kValue, prop->obj()->context());
- Visit(prop->obj());
- ASSERT_EQ(Expression::kValue, prop->key()->context());
- Visit(prop->key());
-
- if (decl->fun() != NULL) {
- ASSERT_EQ(Expression::kValue, decl->fun()->context());
- Visit(decl->fun());
- __ pop(rax);
- } else {
- __ LoadRoot(rax, Heap::kTheHoleValueRootIndex);
- }
-
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
-
- // Absence of a test rax instruction following the call
- // indicates that none of the load was inlined.
-
- // Value in rax is ignored (declarations are statements). Receiver
- // and key on stack are discarded.
- __ Drop(2);
- }
- }
-}
-
-
-void FastCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
- // Call the runtime to declare the globals.
- __ push(rsi); // The context is the first argument.
- __ Push(pairs);
- __ Push(Smi::FromInt(is_eval_ ? 1 : 0));
- __ CallRuntime(Runtime::kDeclareGlobals, 3);
- // Return value is ignored.
-}
-
-
-void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
- Comment cmnt(masm_, "[ FunctionLiteral");
-
- // Build the function boilerplate and instantiate it.
- Handle<JSFunction> boilerplate =
- Compiler::BuildBoilerplate(expr, script_, this);
- if (HasStackOverflow()) return;
-
- ASSERT(boilerplate->IsBoilerplate());
-
- // Create a new closure.
- __ push(rsi);
- __ Push(boilerplate);
- __ CallRuntime(Runtime::kNewClosure, 2);
- Apply(expr->context(), rax);
-}
-
-
-void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
- Comment cmnt(masm_, "[ VariableProxy");
- EmitVariableLoad(expr->var(), expr->context());
-}
-
-
-void FastCodeGenerator::EmitVariableLoad(Variable* var,
- Expression::Context context) {
- Expression* rewrite = var->rewrite();
- if (rewrite == NULL) {
- ASSERT(var->is_global());
- Comment cmnt(masm_, "Global variable");
- // Use inline caching. Variable name is passed in rcx and the global
- // object on the stack.
- __ push(CodeGenerator::GlobalObject());
- __ Move(rcx, var->name());
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
- // A test rax instruction following the call is used by the IC to
- // indicate that the inobject property case was inlined. Ensure there
- // is no test rax instruction here.
- __ nop();
-
- DropAndApply(1, context, rax);
- } else if (rewrite->AsSlot() != NULL) {
- Slot* slot = rewrite->AsSlot();
- if (FLAG_debug_code) {
- switch (slot->type()) {
- case Slot::LOCAL:
- case Slot::PARAMETER: {
- Comment cmnt(masm_, "Stack slot");
- break;
- }
- case Slot::CONTEXT: {
- Comment cmnt(masm_, "Context slot");
- break;
- }
- case Slot::LOOKUP:
- UNIMPLEMENTED();
- break;
- }
- }
- Apply(context, slot, rax);
- } else {
- Comment cmnt(masm_, "Variable rewritten to property");
- // A variable has been rewritten into an explicit access to an object
- // property.
- Property* property = rewrite->AsProperty();
- ASSERT_NOT_NULL(property);
-
- // The only property expressions that can occur are of the form
- // "slot[literal]".
-
- // Assert that the object is in a slot.
- Variable* object = property->obj()->AsVariableProxy()->AsVariable();
- ASSERT_NOT_NULL(object);
- Slot* object_slot = object->slot();
- ASSERT_NOT_NULL(object_slot);
-
- // Load the object.
- MemOperand object_loc = EmitSlotSearch(object_slot, rax);
- __ push(object_loc);
-
- // Assert that the key is a smi.
- Literal* key_literal = property->key()->AsLiteral();
- ASSERT_NOT_NULL(key_literal);
- ASSERT(key_literal->handle()->IsSmi());
-
- // Load the key.
- __ Push(key_literal->handle());
-
- // Do a keyed property load.
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- // Notice: We must not have a "test rax, ..." instruction after the
- // call. It is treated specially by the LoadIC code.
-
- // Drop key and object left on the stack by IC, and push the result.
- DropAndApply(2, context, rax);
- }
-}
-
-
-void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
- Comment cmnt(masm_, "[ RegExpLiteral");
- Label done;
- // Registers will be used as follows:
- // rdi = JS function.
- // rbx = literals array.
- // rax = regexp literal.
- __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ movq(rbx, FieldOperand(rdi, JSFunction::kLiteralsOffset));
- int literal_offset =
- FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
- __ movq(rax, FieldOperand(rbx, literal_offset));
- __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, &done);
- // Create regexp literal using runtime function
- // Result will be in rax.
- __ push(rbx);
- __ Push(Smi::FromInt(expr->literal_index()));
- __ Push(expr->pattern());
- __ Push(expr->flags());
- __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
- // Label done:
- __ bind(&done);
- Apply(expr->context(), rax);
-}
-
-
-void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
- Comment cmnt(masm_, "[ ObjectLiteral");
- __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(rdi, JSFunction::kLiteralsOffset));
- __ Push(Smi::FromInt(expr->literal_index()));
- __ Push(expr->constant_properties());
- if (expr->depth() > 1) {
- __ CallRuntime(Runtime::kCreateObjectLiteral, 3);
- } else {
- __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 3);
- }
-
- // If result_saved == true: The result is saved on top of the
- // stack and in rax.
- // If result_saved == false: The result not on the stack, just in rax.
- bool result_saved = false;
-
- for (int i = 0; i < expr->properties()->length(); i++) {
- ObjectLiteral::Property* property = expr->properties()->at(i);
- if (property->IsCompileTimeValue()) continue;
-
- Literal* key = property->key();
- Expression* value = property->value();
- if (!result_saved) {
- __ push(rax); // Save result on the stack
- result_saved = true;
- }
- switch (property->kind()) {
- case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- ASSERT(!CompileTimeValue::IsCompileTimeValue(value));
- case ObjectLiteral::Property::COMPUTED:
- if (key->handle()->IsSymbol()) {
- Visit(value);
- ASSERT_EQ(Expression::kValue, value->context());
- __ pop(rax);
- __ Move(rcx, key->handle());
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- // StoreIC leaves the receiver on the stack.
- __ movq(rax, Operand(rsp, 0)); // Restore result back into rax.
- break;
- }
- // Fall through.
- case ObjectLiteral::Property::PROTOTYPE:
- __ push(rax);
- Visit(key);
- ASSERT_EQ(Expression::kValue, key->context());
- Visit(value);
- ASSERT_EQ(Expression::kValue, value->context());
- __ CallRuntime(Runtime::kSetProperty, 3);
- __ movq(rax, Operand(rsp, 0)); // Restore result into rax.
- break;
- case ObjectLiteral::Property::SETTER:
- case ObjectLiteral::Property::GETTER:
- __ push(rax);
- Visit(key);
- ASSERT_EQ(Expression::kValue, key->context());
- __ Push(property->kind() == ObjectLiteral::Property::SETTER ?
- Smi::FromInt(1) :
- Smi::FromInt(0));
- Visit(value);
- ASSERT_EQ(Expression::kValue, value->context());
- __ CallRuntime(Runtime::kDefineAccessor, 4);
- __ movq(rax, Operand(rsp, 0)); // Restore result into rax.
- break;
- default: UNREACHABLE();
- }
- }
- switch (expr->context()) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect:
- if (result_saved) __ Drop(1);
- break;
- case Expression::kValue:
- if (!result_saved) __ push(rax);
- break;
- case Expression::kTest:
- if (result_saved) __ pop(rax);
- TestAndBranch(rax, true_label_, false_label_);
- break;
- case Expression::kValueTest: {
- Label discard;
- if (!result_saved) __ push(rax);
- TestAndBranch(rax, true_label_, &discard);
- __ bind(&discard);
- __ Drop(1);
- __ jmp(false_label_);
- break;
- }
- case Expression::kTestValue: {
- Label discard;
- if (!result_saved) __ push(rax);
- TestAndBranch(rax, &discard, false_label_);
- __ bind(&discard);
- __ Drop(1);
- __ jmp(true_label_);
- break;
- }
- }
+void FastCodeGenerator::EmitLoadReceiver(Register reg) {
+ // Offset 2 is due to return address and saved frame pointer.
+ int index = 2 + function()->scope()->num_parameters();
+ __ movq(reg, Operand(rbp, index * kPointerSize));
}
-void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
- Comment cmnt(masm_, "[ ArrayLiteral");
- __ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
- __ Push(Smi::FromInt(expr->literal_index()));
- __ Push(expr->constant_elements());
- if (expr->depth() > 1) {
- __ CallRuntime(Runtime::kCreateArrayLiteral, 3);
- } else {
- __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
- }
-
- bool result_saved = false; // Is the result saved to the stack?
-
- // Emit code to evaluate all the non-constant subexpressions and to store
- // them into the newly cloned array.
- ZoneList<Expression*>* subexprs = expr->values();
- for (int i = 0, len = subexprs->length(); i < len; i++) {
- Expression* subexpr = subexprs->at(i);
- // If the subexpression is a literal or a simple materialized literal it
- // is already set in the cloned array.
- if (subexpr->AsLiteral() != NULL ||
- CompileTimeValue::IsCompileTimeValue(subexpr)) {
- continue;
- }
-
- if (!result_saved) {
- __ push(rax);
- result_saved = true;
- }
- Visit(subexpr);
- ASSERT_EQ(Expression::kValue, subexpr->context());
-
- // Store the subexpression value in the array's elements.
- __ pop(rax); // Subexpression value.
- __ movq(rbx, Operand(rsp, 0)); // Copy of array literal.
- __ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
- int offset = FixedArray::kHeaderSize + (i * kPointerSize);
- __ movq(FieldOperand(rbx, offset), rax);
-
- // Update the write barrier for the array store.
- __ RecordWrite(rbx, offset, rax, rcx);
- }
-
- switch (expr->context()) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect:
- if (result_saved) __ Drop(1);
- break;
- case Expression::kValue:
- if (!result_saved) __ push(rax);
- break;
- case Expression::kTest:
- if (result_saved) __ pop(rax);
- TestAndBranch(rax, true_label_, false_label_);
- break;
- case Expression::kValueTest: {
- Label discard;
- if (!result_saved) __ push(rax);
- TestAndBranch(rax, true_label_, &discard);
- __ bind(&discard);
- __ Drop(1);
- __ jmp(false_label_);
- break;
- }
- case Expression::kTestValue: {
- Label discard;
- if (!result_saved) __ push(rax);
- TestAndBranch(rax, &discard, false_label_);
- __ bind(&discard);
- __ Drop(1);
- __ jmp(true_label_);
- break;
- }
+void FastCodeGenerator::EmitReceiverMapCheck() {
+ Comment cmnt(masm(), ";; MapCheck(this)");
+ if (FLAG_print_ir) {
+ PrintF("MapCheck(this)\n");
}
-}
-
-
-void FastCodeGenerator::EmitNamedPropertyLoad(Property* prop,
- Expression::Context context) {
- SetSourcePosition(prop->position());
- Literal* key = prop->key()->AsLiteral();
- __ Move(rcx, key->handle());
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- Apply(context, rax);
-}
-
-
-void FastCodeGenerator::EmitKeyedPropertyLoad(Property* prop,
- Expression::Context context) {
- SetSourcePosition(prop->position());
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- Apply(context, rax);
-}
+ EmitLoadReceiver(rdx);
+ __ JumpIfSmi(rdx, bailout());
-void FastCodeGenerator::EmitCompoundAssignmentOp(Token::Value op,
- Expression::Context context) {
- GenericBinaryOpStub stub(op,
- NO_OVERWRITE,
- NO_GENERIC_BINARY_FLAGS);
- __ CallStub(&stub);
- Apply(context, rax);
+ ASSERT(has_receiver() && receiver()->IsHeapObject());
+ Handle<HeapObject> object = Handle<HeapObject>::cast(receiver());
+ Handle<Map> map(object->map());
+ __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset), map);
+ __ j(not_equal, bailout());
}
-void FastCodeGenerator::EmitVariableAssignment(Variable* var,
- Expression::Context context) {
- ASSERT(var != NULL);
- ASSERT(var->is_global() || var->slot() != NULL);
- if (var->is_global()) {
- // Assignment to a global variable. Use inline caching for the
- // assignment. Right-hand-side value is passed in rax, variable name in
- // rcx, and the global object on the stack.
- __ pop(rax);
- __ Move(rcx, var->name());
- __ push(CodeGenerator::GlobalObject());
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- // Overwrite the global object on the stack with the result if needed.
- DropAndApply(1, context, rax);
-
- } else if (var->slot() != NULL) {
- Slot* slot = var->slot();
- switch (slot->type()) {
- case Slot::LOCAL:
- case Slot::PARAMETER: {
- Operand target = Operand(rbp, SlotOffset(slot));
- switch (context) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect:
- // Perform assignment and discard value.
- __ pop(target);
- break;
- case Expression::kValue:
- // Perform assignment and preserve value.
- __ movq(rax, Operand(rsp, 0));
- __ movq(target, rax);
- break;
- case Expression::kTest:
- // Perform assignment and test (and discard) value.
- __ pop(rax);
- __ movq(target, rax);
- TestAndBranch(rax, true_label_, false_label_);
- break;
- case Expression::kValueTest: {
- Label discard;
- __ movq(rax, Operand(rsp, 0));
- __ movq(target, rax);
- TestAndBranch(rax, true_label_, &discard);
- __ bind(&discard);
- __ Drop(1);
- __ jmp(false_label_);
- break;
- }
- case Expression::kTestValue: {
- Label discard;
- __ movq(rax, Operand(rsp, 0));
- __ movq(target, rax);
- TestAndBranch(rax, &discard, false_label_);
- __ bind(&discard);
- __ Drop(1);
- __ jmp(true_label_);
- break;
- }
- }
- break;
- }
-
- case Slot::CONTEXT: {
- MemOperand target = EmitSlotSearch(slot, rcx);
- __ pop(rax);
- __ movq(target, rax);
-
- // RecordWrite may destroy all its register arguments.
- if (context == Expression::kValue) {
- __ push(rax);
- } else if (context != Expression::kEffect) {
- __ movq(rdx, rax);
- }
- int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
- __ RecordWrite(rcx, offset, rax, rbx);
- if (context != Expression::kEffect && context != Expression::kValue) {
- Apply(context, rdx);
- }
- break;
- }
-
- case Slot::LOOKUP:
- UNREACHABLE();
- break;
- }
- } else {
- // Variables rewritten as properties are not treated as variables in
- // assignments.
- UNREACHABLE();
- }
-}
-
-
-void FastCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
- // Assignment to a property, using a named store IC.
- Property* prop = expr->target()->AsProperty();
- ASSERT(prop != NULL);
- ASSERT(prop->key()->AsLiteral() != NULL);
-
- // If the assignment starts a block of assignments to the same object,
- // change to slow case to avoid the quadratic behavior of repeatedly
- // adding fast properties.
- if (expr->starts_initialization_block()) {
- __ push(Operand(rsp, kPointerSize)); // Receiver is under value.
- __ CallRuntime(Runtime::kToSlowProperties, 1);
- }
-
- __ pop(rax);
- __ Move(rcx, prop->key()->AsLiteral()->handle());
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
-
- // If the assignment ends an initialization block, revert to fast case.
- if (expr->ends_initialization_block()) {
- __ push(rax); // Result of assignment, saved even if not needed.
- __ push(Operand(rsp, kPointerSize)); // Receiver is under value.
- __ CallRuntime(Runtime::kToFastProperties, 1);
- __ pop(rax);
- }
-
- DropAndApply(1, expr->context(), rax);
-}
-
-
-void FastCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
- // Assignment to a property, using a keyed store IC.
-
- // If the assignment starts a block of assignments to the same object,
- // change to slow case to avoid the quadratic behavior of repeatedly
- // adding fast properties.
- if (expr->starts_initialization_block()) {
- // Reciever is under the key and value.
- __ push(Operand(rsp, 2 * kPointerSize));
- __ CallRuntime(Runtime::kToSlowProperties, 1);
- }
-
- __ pop(rax);
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- // This nop signals to the IC that there is no inlined code at the call
- // site for it to patch.
- __ nop();
-
- // If the assignment ends an initialization block, revert to fast case.
- if (expr->ends_initialization_block()) {
- __ push(rax); // Result of assignment, saved even if not needed.
- // Reciever is under the key and value.
- __ push(Operand(rsp, 2 * kPointerSize));
- __ CallRuntime(Runtime::kToFastProperties, 1);
- __ pop(rax);
- }
-
- // Receiver and key are still on stack.
- DropAndApply(2, expr->context(), rax);
-}
-
-
-void FastCodeGenerator::VisitProperty(Property* expr) {
- Comment cmnt(masm_, "[ Property");
- Expression* key = expr->key();
-
- // Record the source position for the property load.
- SetSourcePosition(expr->position());
-
- // Evaluate receiver.
- Visit(expr->obj());
-
- if (key->IsPropertyName()) {
- // Do a named property load. The IC expects the property name in rcx
- // and the receiver on the stack.
- __ Move(rcx, key->AsLiteral()->handle());
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- // By emitting a nop we make sure that we do not have a "test rax,..."
- // instruction after the call it is treated specially by the LoadIC code.
- __ nop();
- DropAndApply(1, expr->context(), rax);
- } else {
- // Do a keyed property load.
- Visit(expr->key());
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- // Notice: We must not have a "test rax, ..." instruction after the
- // call. It is treated specially by the LoadIC code.
- __ nop();
- // Drop key and receiver left on the stack by IC.
- DropAndApply(2, expr->context(), rax);
- }
-}
-
-
-void FastCodeGenerator::EmitCallWithIC(Call* expr,
- Handle<Object> ignored,
- RelocInfo::Mode mode) {
- // Code common for calls using the IC.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Visit(args->at(i));
- ASSERT_EQ(Expression::kValue, args->at(i)->context());
- }
- // Record source position for debugger.
- SetSourcePosition(expr->position());
- // Call the IC initialization code.
- Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
- NOT_IN_LOOP);
- __ call(ic, mode);
- // Restore context register.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- // Discard the function left on TOS.
- DropAndApply(1, expr->context(), rax);
-}
-
-
-void FastCodeGenerator::EmitCallWithStub(Call* expr) {
- // Code common for calls using the call stub.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Visit(args->at(i));
- }
- // Record source position for debugger.
- SetSourcePosition(expr->position());
- CallFunctionStub stub(arg_count, NOT_IN_LOOP);
- __ CallStub(&stub);
- // Restore context register.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- // Discard the function left on TOS.
- DropAndApply(1, expr->context(), rax);
-}
-
-
-void FastCodeGenerator::VisitCall(Call* expr) {
- Comment cmnt(masm_, "[ Call");
- Expression* fun = expr->expression();
- Variable* var = fun->AsVariableProxy()->AsVariable();
-
- if (var != NULL && var->is_possibly_eval()) {
- // Call to the identifier 'eval'.
- UNREACHABLE();
- } else if (var != NULL && !var->is_this() && var->is_global()) {
- // Call to a global variable.
- __ Push(var->name());
- // Push global object as receiver for the call IC lookup.
- __ push(CodeGenerator::GlobalObject());
- EmitCallWithIC(expr, var->name(), RelocInfo::CODE_TARGET_CONTEXT);
- } else if (var != NULL && var->slot() != NULL &&
- var->slot()->type() == Slot::LOOKUP) {
- // Call to a lookup slot.
- UNREACHABLE();
- } else if (fun->AsProperty() != NULL) {
- // Call to an object property.
- Property* prop = fun->AsProperty();
- Literal* key = prop->key()->AsLiteral();
- if (key != NULL && key->handle()->IsSymbol()) {
- // Call to a named property, use call IC.
- __ Push(key->handle());
- Visit(prop->obj());
- EmitCallWithIC(expr, key->handle(), RelocInfo::CODE_TARGET);
- } else {
- // Call to a keyed property, use keyed load IC followed by function
- // call.
- Visit(prop->obj());
- Visit(prop->key());
- // Record source code position for IC call.
- SetSourcePosition(prop->position());
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- // By emitting a nop we make sure that we do not have a "test rax,..."
- // instruction after the call it is treated specially by the LoadIC code.
- __ nop();
- // Drop key left on the stack by IC.
- __ Drop(1);
- // Pop receiver.
- __ pop(rbx);
- // Push result (function).
- __ push(rax);
- // Push receiver object on stack.
- if (prop->is_synthetic()) {
- __ movq(rcx, CodeGenerator::GlobalObject());
- __ push(FieldOperand(rcx, GlobalObject::kGlobalReceiverOffset));
- } else {
- __ push(rbx);
- }
- EmitCallWithStub(expr);
- }
- } else {
- // Call to some other expression. If the expression is an anonymous
- // function literal not called in a loop, mark it as one that should
- // also use the fast code generator.
- FunctionLiteral* lit = fun->AsFunctionLiteral();
- if (lit != NULL &&
- lit->name()->Equals(Heap::empty_string()) &&
- loop_depth() == 0) {
- lit->set_try_fast_codegen(true);
- }
- Visit(fun);
- // Load global receiver object.
- __ movq(rbx, CodeGenerator::GlobalObject());
- __ push(FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
- // Emit function call.
- EmitCallWithStub(expr);
- }
-}
-
-
-void FastCodeGenerator::VisitCallNew(CallNew* expr) {
- Comment cmnt(masm_, "[ CallNew");
- // According to ECMA-262, section 11.2.2, page 44, the function
- // expression in new calls must be evaluated before the
- // arguments.
- // Push function on the stack.
- Visit(expr->expression());
- ASSERT_EQ(Expression::kValue, expr->expression()->context());
- // If location is value, already on the stack,
-
- // Push global object (receiver).
+void FastCodeGenerator::EmitGlobalVariableLoad(Handle<String> name) {
+ // Compile global variable accesses as load IC calls. The only live
+ // registers are rsi (context) and possibly rdx (this). Both are also
+ // saved in the stack and rsi is preserved by the call.
__ push(CodeGenerator::GlobalObject());
-
- // Push the arguments ("left-to-right") on the stack.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Visit(args->at(i));
- ASSERT_EQ(Expression::kValue, args->at(i)->context());
- // If location is value, it is already on the stack,
- // so nothing to do here.
- }
-
- // Call the construct call builtin that handles allocation and
- // constructor invocation.
- SetSourcePosition(expr->position());
-
- // Load function, arg_count into rdi and rax.
- __ Set(rax, arg_count);
- // Function is in rsp[arg_count + 1].
- __ movq(rdi, Operand(rsp, rax, times_pointer_size, kPointerSize));
-
- Handle<Code> construct_builtin(Builtins::builtin(Builtins::JSConstructCall));
- __ Call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
-
- // Replace function on TOS with result in rax, or pop it.
- DropAndApply(1, expr->context(), rax);
-}
-
-
-void FastCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
- Comment cmnt(masm_, "[ CallRuntime");
- ZoneList<Expression*>* args = expr->arguments();
-
- if (expr->is_jsruntime()) {
- // Prepare for calling JS runtime function.
- __ Push(expr->name());
- __ movq(rax, CodeGenerator::GlobalObject());
- __ push(FieldOperand(rax, GlobalObject::kBuiltinsOffset));
- }
-
- // Push the arguments ("left-to-right").
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Visit(args->at(i));
- ASSERT_EQ(Expression::kValue, args->at(i)->context());
- }
-
- if (expr->is_jsruntime()) {
- // Call the JS runtime function.
- Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
- NOT_IN_LOOP);
- __ call(ic, RelocInfo::CODE_TARGET);
- // Restore context register.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- // Discard the function left on TOS.
- DropAndApply(1, expr->context(), rax);
+ __ Move(rcx, name);
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ if (has_this_properties()) {
+ // Restore this.
+ EmitLoadReceiver(rdx);
} else {
- __ CallRuntime(expr->function(), arg_count);
- Apply(expr->context(), rax);
- }
-}
-
-
-void FastCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
- switch (expr->op()) {
- case Token::VOID: {
- Comment cmnt(masm_, "[ UnaryOperation (VOID)");
- Visit(expr->expression());
- ASSERT_EQ(Expression::kEffect, expr->expression()->context());
- switch (expr->context()) {
- case Expression::kUninitialized:
- UNREACHABLE();
- break;
- case Expression::kEffect:
- break;
- case Expression::kValue:
- __ PushRoot(Heap::kUndefinedValueRootIndex);
- break;
- case Expression::kTestValue:
- // Value is false so it's needed.
- __ PushRoot(Heap::kUndefinedValueRootIndex);
- // Fall through.
- case Expression::kTest:
- case Expression::kValueTest:
- __ jmp(false_label_);
- break;
- }
- break;
- }
-
- case Token::NOT: {
- Comment cmnt(masm_, "[ UnaryOperation (NOT)");
- ASSERT_EQ(Expression::kTest, expr->expression()->context());
-
- Label push_true, push_false, done;
- switch (expr->context()) {
- case Expression::kUninitialized:
- UNREACHABLE();
- break;
-
- case Expression::kValue:
- VisitForControl(expr->expression(), &push_false, &push_true);
- __ bind(&push_true);
- __ PushRoot(Heap::kTrueValueRootIndex);
- __ jmp(&done);
- __ bind(&push_false);
- __ PushRoot(Heap::kFalseValueRootIndex);
- __ bind(&done);
- break;
-
- case Expression::kEffect:
- VisitForControl(expr->expression(), &done, &done);
- __ bind(&done);
- break;
-
- case Expression::kTest:
- VisitForControl(expr->expression(), false_label_, true_label_);
- break;
-
- case Expression::kValueTest:
- VisitForControl(expr->expression(), false_label_, &push_true);
- __ bind(&push_true);
- __ PushRoot(Heap::kTrueValueRootIndex);
- __ jmp(true_label_);
- break;
-
- case Expression::kTestValue:
- VisitForControl(expr->expression(), &push_false, true_label_);
- __ bind(&push_false);
- __ PushRoot(Heap::kFalseValueRootIndex);
- __ jmp(false_label_);
- break;
- }
- break;
- }
-
- case Token::TYPEOF: {
- Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
- ASSERT_EQ(Expression::kValue, expr->expression()->context());
-
- VariableProxy* proxy = expr->expression()->AsVariableProxy();
- if (proxy != NULL &&
- !proxy->var()->is_this() &&
- proxy->var()->is_global()) {
- Comment cmnt(masm_, "Global variable");
- __ push(CodeGenerator::GlobalObject());
- __ Move(rcx, proxy->name());
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- // Use a regular load, not a contextual load, to avoid a reference
- // error.
- __ Call(ic, RelocInfo::CODE_TARGET);
- __ movq(Operand(rsp, 0), rax);
- } else if (proxy != NULL &&
- proxy->var()->slot() != NULL &&
- proxy->var()->slot()->type() == Slot::LOOKUP) {
- __ push(rsi);
- __ Push(proxy->name());
- __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
- __ push(rax);
- } else {
- // This expression cannot throw a reference error at the top level.
- Visit(expr->expression());
- }
-
- __ CallRuntime(Runtime::kTypeof, 1);
- Apply(expr->context(), rax);
- break;
- }
-
- default:
- UNREACHABLE();
+ __ nop(); // Not test rax, indicates IC has no inlined code at call site.
}
}
-void FastCodeGenerator::VisitCountOperation(CountOperation* expr) {
- Comment cmnt(masm_, "[ CountOperation");
+void FastCodeGenerator::EmitThisPropertyStore(Handle<String> name) {
+ LookupResult lookup;
+ receiver()->Lookup(*name, &lookup);
- // Expression can only be a property, a global or a (parameter or local)
- // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
- enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
- LhsKind assign_type = VARIABLE;
- Property* prop = expr->expression()->AsProperty();
- // In case of a property we use the uninitialized expression context
- // of the key to detect a named property.
- if (prop != NULL) {
- assign_type = (prop->key()->context() == Expression::kUninitialized)
- ? NAMED_PROPERTY
- : KEYED_PROPERTY;
- }
-
- // Evaluate expression and get value.
- if (assign_type == VARIABLE) {
- ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
- EmitVariableLoad(expr->expression()->AsVariableProxy()->var(),
- Expression::kValue);
- } else {
- // Reserve space for result of postfix operation.
- if (expr->is_postfix() && expr->context() != Expression::kEffect) {
- ASSERT(expr->context() != Expression::kUninitialized);
- __ Push(Smi::FromInt(0));
- }
- Visit(prop->obj());
- ASSERT_EQ(Expression::kValue, prop->obj()->context());
- if (assign_type == NAMED_PROPERTY) {
- EmitNamedPropertyLoad(prop, Expression::kValue);
- } else {
- Visit(prop->key());
- ASSERT_EQ(Expression::kValue, prop->key()->context());
- EmitKeyedPropertyLoad(prop, Expression::kValue);
- }
- }
-
- // Convert to number.
- __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
+ ASSERT(lookup.holder() == *receiver());
+ ASSERT(lookup.type() == FIELD);
+ Handle<Map> map(Handle<HeapObject>::cast(receiver())->map());
+ int index = lookup.GetFieldIndex() - map->inobject_properties();
+ int offset = index * kPointerSize;
- // Save result for postfix expressions.
- if (expr->is_postfix()) {
- switch (expr->context()) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect:
- // Do not save result.
- break;
- case Expression::kValue: // Fall through
- case Expression::kTest: // Fall through
- case Expression::kTestValue: // Fall through
- case Expression::kValueTest:
- // Save the result on the stack. If we have a named or keyed property
- // we store the result under the receiver that is currently on top
- // of the stack.
- switch (assign_type) {
- case VARIABLE:
- __ push(rax);
- break;
- case NAMED_PROPERTY:
- __ movq(Operand(rsp, kPointerSize), rax);
- break;
- case KEYED_PROPERTY:
- __ movq(Operand(rsp, 2 * kPointerSize), rax);
- break;
- }
- break;
- }
- }
-
- // Call runtime for +1/-1.
- __ push(rax);
- __ Push(Smi::FromInt(1));
- if (expr->op() == Token::INC) {
- __ CallRuntime(Runtime::kNumberAdd, 2);
+ // Negative offsets are inobject properties.
+ if (offset < 0) {
+ offset += map->instance_size();
+ __ movq(rcx, rdx); // Copy receiver for write barrier.
} else {
- __ CallRuntime(Runtime::kNumberSub, 2);
- }
-
- // Store the value returned in rax.
- switch (assign_type) {
- case VARIABLE:
- __ push(rax);
- if (expr->is_postfix()) {
- EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Expression::kEffect);
- // For all contexts except kEffect: We have the result on
- // top of the stack.
- if (expr->context() != Expression::kEffect) {
- ApplyTOS(expr->context());
- }
- } else {
- EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- expr->context());
- }
- break;
- case NAMED_PROPERTY: {
- __ Move(rcx, prop->key()->AsLiteral()->handle());
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- // This nop signals to the IC that there is no inlined code at the call
- // site for it to patch.
- __ nop();
- if (expr->is_postfix()) {
- __ Drop(1); // Result is on the stack under the receiver.
- if (expr->context() != Expression::kEffect) {
- ApplyTOS(expr->context());
- }
- } else {
- DropAndApply(1, expr->context(), rax);
- }
- break;
- }
- case KEYED_PROPERTY: {
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- // This nop signals to the IC that there is no inlined code at the call
- // site for it to patch.
- __ nop();
- if (expr->is_postfix()) {
- __ Drop(2); // Result is on the stack under the key and the receiver.
- if (expr->context() != Expression::kEffect) {
- ApplyTOS(expr->context());
- }
- } else {
- DropAndApply(2, expr->context(), rax);
- }
- break;
- }
+ offset += FixedArray::kHeaderSize;
+ __ movq(rcx, FieldOperand(rdx, JSObject::kPropertiesOffset));
}
+ // Perform the store.
+ __ movq(FieldOperand(rcx, offset), rax);
+ // Preserve value from write barrier in case it's needed.
+ __ movq(rbx, rax);
+ __ RecordWrite(rcx, offset, rbx, rdi);
}
-void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
- Comment cmnt(masm_, "[ BinaryOperation");
- switch (expr->op()) {
- case Token::COMMA:
- ASSERT_EQ(Expression::kEffect, expr->left()->context());
- ASSERT_EQ(expr->context(), expr->right()->context());
- Visit(expr->left());
- Visit(expr->right());
- break;
-
- case Token::OR:
- case Token::AND:
- EmitLogicalOperation(expr);
- break;
-
- case Token::ADD:
- case Token::SUB:
- case Token::DIV:
- case Token::MOD:
- case Token::MUL:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SHL:
- case Token::SHR:
- case Token::SAR: {
- ASSERT_EQ(Expression::kValue, expr->left()->context());
- ASSERT_EQ(Expression::kValue, expr->right()->context());
-
- Visit(expr->left());
- Visit(expr->right());
- GenericBinaryOpStub stub(expr->op(),
- NO_OVERWRITE,
- NO_GENERIC_BINARY_FLAGS);
- __ CallStub(&stub);
- Apply(expr->context(), rax);
-
- break;
- }
- default:
- UNREACHABLE();
- }
-}
-
-
-void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
- Comment cmnt(masm_, "[ CompareOperation");
- ASSERT_EQ(Expression::kValue, expr->left()->context());
- ASSERT_EQ(Expression::kValue, expr->right()->context());
- Visit(expr->left());
- Visit(expr->right());
- // Always perform the comparison for its control flow. Pack the result
- // into the expression's context after the comparison is performed.
- Label push_true, push_false, done;
- // Initially assume we are in a test context.
- Label* if_true = true_label_;
- Label* if_false = false_label_;
- switch (expr->context()) {
- case Expression::kUninitialized:
- UNREACHABLE();
- break;
- case Expression::kValue:
- if_true = &push_true;
- if_false = &push_false;
- break;
- case Expression::kEffect:
- if_true = &done;
- if_false = &done;
- break;
- case Expression::kTest:
- break;
- case Expression::kValueTest:
- if_true = &push_true;
- break;
- case Expression::kTestValue:
- if_false = &push_false;
- break;
- }
-
- switch (expr->op()) {
- case Token::IN: {
- __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
- __ CompareRoot(rax, Heap::kTrueValueRootIndex);
- __ j(equal, if_true);
- __ jmp(if_false);
- break;
- }
-
- case Token::INSTANCEOF: {
- InstanceofStub stub;
- __ CallStub(&stub);
- __ testq(rax, rax);
- __ j(zero, if_true); // The stub returns 0 for true.
- __ jmp(if_false);
- break;
- }
-
- default: {
- Condition cc = no_condition;
- bool strict = false;
- switch (expr->op()) {
- case Token::EQ_STRICT:
- strict = true;
- // Fall through.
- case Token::EQ:
- cc = equal;
- __ pop(rax);
- __ pop(rdx);
- break;
- case Token::LT:
- cc = less;
- __ pop(rax);
- __ pop(rdx);
- break;
- case Token::GT:
- // Reverse left and right sizes to obtain ECMA-262 conversion order.
- cc = less;
- __ pop(rdx);
- __ pop(rax);
- break;
- case Token::LTE:
- // Reverse left and right sizes to obtain ECMA-262 conversion order.
- cc = greater_equal;
- __ pop(rdx);
- __ pop(rax);
- break;
- case Token::GTE:
- cc = greater_equal;
- __ pop(rax);
- __ pop(rdx);
- break;
- case Token::IN:
- case Token::INSTANCEOF:
- default:
- UNREACHABLE();
- }
-
- // The comparison stub expects the smi vs. smi case to be handled
- // before it is called.
- Label slow_case;
- __ JumpIfNotBothSmi(rax, rdx, &slow_case);
- __ SmiCompare(rdx, rax);
- __ j(cc, if_true);
- __ jmp(if_false);
-
- __ bind(&slow_case);
- CompareStub stub(cc, strict);
- __ CallStub(&stub);
- __ testq(rax, rax);
- __ j(cc, if_true);
- __ jmp(if_false);
- }
- }
-
- // Convert the result of the comparison into one expected for this
- // expression's context.
- switch (expr->context()) {
- case Expression::kUninitialized:
- UNREACHABLE();
- break;
-
- case Expression::kEffect:
- __ bind(&done);
- break;
-
- case Expression::kValue:
- __ bind(&push_true);
- __ PushRoot(Heap::kTrueValueRootIndex);
- __ jmp(&done);
- __ bind(&push_false);
- __ PushRoot(Heap::kFalseValueRootIndex);
- __ bind(&done);
- break;
-
- case Expression::kTest:
- break;
-
- case Expression::kValueTest:
- __ bind(&push_true);
- __ PushRoot(Heap::kTrueValueRootIndex);
- __ jmp(true_label_);
- break;
-
- case Expression::kTestValue:
- __ bind(&push_false);
- __ PushRoot(Heap::kFalseValueRootIndex);
- __ jmp(false_label_);
- break;
- }
-}
-
-
-void FastCodeGenerator::VisitThisFunction(ThisFunction* expr) {
- __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- Apply(expr->context(), rax);
-}
-
-
-Register FastCodeGenerator::result_register() { return rax; }
-
-
-Register FastCodeGenerator::context_register() { return rsi; }
-
-
-void FastCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
- ASSERT(IsAligned(frame_offset, kPointerSize));
- __ movq(Operand(rbp, frame_offset), value);
-}
-
-
-void FastCodeGenerator::LoadContextField(Register dst, int context_index) {
- __ movq(dst, CodeGenerator::ContextOperand(rsi, context_index));
-}
+void FastCodeGenerator::Generate(FunctionLiteral* fun, CompilationInfo* info) {
+ ASSERT(function_ == NULL);
+ ASSERT(info_ == NULL);
+ function_ = fun;
+ info_ = info;
+ // Save the caller's frame pointer and set up our own.
+ Comment prologue_cmnt(masm(), ";; Prologue");
+ __ push(rbp);
+ __ movq(rbp, rsp);
+ __ push(rsi); // Context.
+ __ push(rdi); // Closure.
+ // Note that we keep a live register reference to esi (context) at this
+ // point.
-// ----------------------------------------------------------------------------
-// Non-local control flow support.
+ // Receiver (this) is allocated to rdx if there are this properties.
+ if (has_this_properties()) EmitReceiverMapCheck();
+ VisitStatements(fun->body());
-void FastCodeGenerator::EnterFinallyBlock() {
- ASSERT(!result_register().is(rdx));
- ASSERT(!result_register().is(rcx));
- // Cook return address on top of stack (smi encoded Code* delta)
- __ movq(rdx, Operand(rsp, 0));
- __ Move(rcx, masm_->CodeObject());
- __ subq(rdx, rcx);
- __ Integer32ToSmi(rdx, rdx);
- __ movq(Operand(rsp, 0), rdx);
- // Store result register while executing finally block.
- __ push(result_register());
-}
+ Comment return_cmnt(masm(), ";; Return(<undefined>)");
+ __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
+ Comment epilogue_cmnt(masm(), ";; Epilogue");
+ __ movq(rsp, rbp);
+ __ pop(rbp);
+ __ ret((fun->scope()->num_parameters() + 1) * kPointerSize);
-void FastCodeGenerator::ExitFinallyBlock() {
- ASSERT(!result_register().is(rdx));
- ASSERT(!result_register().is(rcx));
- // Restore result register from stack.
- __ pop(result_register());
- // Uncook return address.
- __ movq(rdx, Operand(rsp, 0));
- __ SmiToInteger32(rdx, rdx);
- __ Move(rcx, masm_->CodeObject());
- __ addq(rdx, rcx);
- __ movq(Operand(rsp, 0), rdx);
- // And return.
- __ ret(0);
+ __ bind(&bailout_);
}
diff --git a/deps/v8/src/x64/full-codegen-x64.cc b/deps/v8/src/x64/full-codegen-x64.cc
new file mode 100644
index 000000000..f5bbfafe6
--- /dev/null
+++ b/deps/v8/src/x64/full-codegen-x64.cc
@@ -0,0 +1,1907 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "compiler.h"
+#include "debug.h"
+#include "full-codegen.h"
+#include "parser.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm_)
+
+// Generate code for a JS function. On entry to the function the receiver
+// and arguments have been pushed on the stack left to right, with the
+// return address on top of them. The actual argument count matches the
+// formal parameter count expected by the function.
+//
+// The live registers are:
+// o rdi: the JS function object being called (ie, ourselves)
+// o rsi: our context
+// o rbp: our caller's frame pointer
+// o rsp: stack pointer (pointing to return address)
+//
+// The function builds a JS frame. Please see JavaScriptFrameConstants in
+// frames-x64.h for its layout.
+void FullCodeGenerator::Generate(FunctionLiteral* fun, Mode mode) {
+ function_ = fun;
+ SetFunctionPosition(fun);
+
+ if (mode == PRIMARY) {
+ __ push(rbp); // Caller's frame pointer.
+ __ movq(rbp, rsp);
+ __ push(rsi); // Callee's context.
+ __ push(rdi); // Callee's JS Function.
+
+ { Comment cmnt(masm_, "[ Allocate locals");
+ int locals_count = fun->scope()->num_stack_slots();
+ if (locals_count == 1) {
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
+ } else if (locals_count > 1) {
+ __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
+ for (int i = 0; i < locals_count; i++) {
+ __ push(rdx);
+ }
+ }
+ }
+
+ bool function_in_register = true;
+
+ // Possibly allocate a local context.
+ if (fun->scope()->num_heap_slots() > 0) {
+ Comment cmnt(masm_, "[ Allocate local context");
+ // Argument to NewContext is the function, which is still in rdi.
+ __ push(rdi);
+ __ CallRuntime(Runtime::kNewContext, 1);
+ function_in_register = false;
+ // Context is returned in both rax and rsi. It replaces the context
+ // passed to us. It's saved in the stack and kept live in rsi.
+ __ movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
+
+ // Copy any necessary parameters into the context.
+ int num_parameters = fun->scope()->num_parameters();
+ for (int i = 0; i < num_parameters; i++) {
+ Slot* slot = fun->scope()->parameter(i)->slot();
+ if (slot != NULL && slot->type() == Slot::CONTEXT) {
+ int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+ (num_parameters - 1 - i) * kPointerSize;
+ // Load parameter from stack.
+ __ movq(rax, Operand(rbp, parameter_offset));
+ // Store it in the context.
+ int context_offset = Context::SlotOffset(slot->index());
+ __ movq(Operand(rsi, context_offset), rax);
+ // Update the write barrier. This clobbers all involved
+ // registers, so we have use a third register to avoid
+ // clobbering rsi.
+ __ movq(rcx, rsi);
+ __ RecordWrite(rcx, context_offset, rax, rbx);
+ }
+ }
+ }
+
+ // Possibly allocate an arguments object.
+ Variable* arguments = fun->scope()->arguments()->AsVariable();
+ if (arguments != NULL) {
+ // Arguments object must be allocated after the context object, in
+ // case the "arguments" or ".arguments" variables are in the context.
+ Comment cmnt(masm_, "[ Allocate arguments object");
+ if (function_in_register) {
+ __ push(rdi);
+ } else {
+ __ push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ }
+ // The receiver is just before the parameters on the caller's stack.
+ __ lea(rdx, Operand(rbp, StandardFrameConstants::kCallerSPOffset +
+ fun->num_parameters() * kPointerSize));
+ __ push(rdx);
+ __ Push(Smi::FromInt(fun->num_parameters()));
+ // Arguments to ArgumentsAccessStub:
+ // function, receiver address, parameter count.
+ // The stub will rewrite receiver and parameter count if the previous
+ // stack frame was an arguments adapter frame.
+ ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
+ __ CallStub(&stub);
+ // Store new arguments object in both "arguments" and ".arguments" slots.
+ __ movq(rcx, rax);
+ Move(arguments->slot(), rax, rbx, rdx);
+ Slot* dot_arguments_slot =
+ fun->scope()->arguments_shadow()->AsVariable()->slot();
+ Move(dot_arguments_slot, rcx, rbx, rdx);
+ }
+ }
+
+ { Comment cmnt(masm_, "[ Declarations");
+ VisitDeclarations(fun->scope()->declarations());
+ }
+
+ { Comment cmnt(masm_, "[ Stack check");
+ Label ok;
+ __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
+ __ j(above_equal, &ok);
+ StackCheckStub stub;
+ __ CallStub(&stub);
+ __ bind(&ok);
+ }
+
+ if (FLAG_trace) {
+ __ CallRuntime(Runtime::kTraceEnter, 0);
+ }
+
+ { Comment cmnt(masm_, "[ Body");
+ ASSERT(loop_depth() == 0);
+ VisitStatements(fun->body());
+ ASSERT(loop_depth() == 0);
+ }
+
+ { Comment cmnt(masm_, "[ return <undefined>;");
+ // Emit a 'return undefined' in case control fell off the end of the body.
+ __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
+ EmitReturnSequence(function_->end_position());
+ }
+}
+
+
+void FullCodeGenerator::EmitReturnSequence(int position) {
+ Comment cmnt(masm_, "[ Return sequence");
+ if (return_label_.is_bound()) {
+ __ jmp(&return_label_);
+ } else {
+ __ bind(&return_label_);
+ if (FLAG_trace) {
+ __ push(rax);
+ __ CallRuntime(Runtime::kTraceExit, 1);
+ }
+#ifdef DEBUG
+ // Add a label for checking the size of the code used for returning.
+ Label check_exit_codesize;
+ masm_->bind(&check_exit_codesize);
+#endif
+ CodeGenerator::RecordPositions(masm_, position);
+ __ RecordJSReturn();
+ // Do not use the leave instruction here because it is too short to
+ // patch with the code required by the debugger.
+ __ movq(rsp, rbp);
+ __ pop(rbp);
+ __ ret((function_->scope()->num_parameters() + 1) * kPointerSize);
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ // Add padding that will be overwritten by a debugger breakpoint. We
+ // have just generated "movq rsp, rbp; pop rbp; ret k" with length 7
+ // (3 + 1 + 3).
+ const int kPadding = Assembler::kJSReturnSequenceLength - 7;
+ for (int i = 0; i < kPadding; ++i) {
+ masm_->int3();
+ }
+ // Check that the size of the code used for returning matches what is
+ // expected by the debugger.
+ ASSERT_EQ(Assembler::kJSReturnSequenceLength,
+ masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
+#endif
+ }
+}
+
+
+void FullCodeGenerator::Apply(Expression::Context context, Register reg) {
+ switch (context) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+
+ case Expression::kEffect:
+ // Nothing to do.
+ break;
+
+ case Expression::kValue:
+ // Move value into place.
+ switch (location_) {
+ case kAccumulator:
+ if (!reg.is(result_register())) __ movq(result_register(), reg);
+ break;
+ case kStack:
+ __ push(reg);
+ break;
+ }
+ break;
+
+ case Expression::kTest:
+ // For simplicity we always test the accumulator register.
+ if (!reg.is(result_register())) __ movq(result_register(), reg);
+ DoTest(context);
+ break;
+
+ case Expression::kValueTest:
+ case Expression::kTestValue:
+ if (!reg.is(result_register())) __ movq(result_register(), reg);
+ switch (location_) {
+ case kAccumulator:
+ break;
+ case kStack:
+ __ push(result_register());
+ break;
+ }
+ DoTest(context);
+ break;
+ }
+}
+
+
+void FullCodeGenerator::Apply(Expression::Context context, Slot* slot) {
+ switch (context) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ // Nothing to do.
+ break;
+ case Expression::kValue: {
+ MemOperand slot_operand = EmitSlotSearch(slot, result_register());
+ switch (location_) {
+ case kAccumulator:
+ __ movq(result_register(), slot_operand);
+ break;
+ case kStack:
+ // Memory operands can be pushed directly.
+ __ push(slot_operand);
+ break;
+ }
+ break;
+ }
+
+ case Expression::kTest:
+ Move(result_register(), slot);
+ DoTest(context);
+ break;
+
+ case Expression::kValueTest:
+ case Expression::kTestValue:
+ Move(result_register(), slot);
+ switch (location_) {
+ case kAccumulator:
+ break;
+ case kStack:
+ __ push(result_register());
+ break;
+ }
+ DoTest(context);
+ break;
+ }
+}
+
+
+void FullCodeGenerator::Apply(Expression::Context context, Literal* lit) {
+ switch (context) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ // Nothing to do.
+ break;
+ case Expression::kValue:
+ switch (location_) {
+ case kAccumulator:
+ __ Move(result_register(), lit->handle());
+ break;
+ case kStack:
+ __ Push(lit->handle());
+ break;
+ }
+ break;
+
+ case Expression::kTest:
+ __ Move(result_register(), lit->handle());
+ DoTest(context);
+ break;
+
+ case Expression::kValueTest:
+ case Expression::kTestValue:
+ __ Move(result_register(), lit->handle());
+ switch (location_) {
+ case kAccumulator:
+ break;
+ case kStack:
+ __ push(result_register());
+ break;
+ }
+ DoTest(context);
+ break;
+ }
+}
+
+
+void FullCodeGenerator::ApplyTOS(Expression::Context context) {
+ switch (context) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+
+ case Expression::kEffect:
+ __ Drop(1);
+ break;
+
+ case Expression::kValue:
+ switch (location_) {
+ case kAccumulator:
+ __ pop(result_register());
+ break;
+ case kStack:
+ break;
+ }
+ break;
+
+ case Expression::kTest:
+ __ pop(result_register());
+ DoTest(context);
+ break;
+
+ case Expression::kValueTest:
+ case Expression::kTestValue:
+ switch (location_) {
+ case kAccumulator:
+ __ pop(result_register());
+ break;
+ case kStack:
+ __ movq(result_register(), Operand(rsp, 0));
+ break;
+ }
+ DoTest(context);
+ break;
+ }
+}
+
+
+void FullCodeGenerator::DropAndApply(int count,
+ Expression::Context context,
+ Register reg) {
+ ASSERT(count > 0);
+ ASSERT(!reg.is(rsp));
+ switch (context) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+
+ case Expression::kEffect:
+ __ Drop(count);
+ break;
+
+ case Expression::kValue:
+ switch (location_) {
+ case kAccumulator:
+ __ Drop(count);
+ if (!reg.is(result_register())) __ movq(result_register(), reg);
+ break;
+ case kStack:
+ if (count > 1) __ Drop(count - 1);
+ __ movq(Operand(rsp, 0), reg);
+ break;
+ }
+ break;
+
+ case Expression::kTest:
+ __ Drop(count);
+ if (!reg.is(result_register())) __ movq(result_register(), reg);
+ DoTest(context);
+ break;
+
+ case Expression::kValueTest:
+ case Expression::kTestValue:
+ switch (location_) {
+ case kAccumulator:
+ __ Drop(count);
+ if (!reg.is(result_register())) __ movq(result_register(), reg);
+ break;
+ case kStack:
+ if (count > 1) __ Drop(count - 1);
+ __ movq(result_register(), reg);
+ __ movq(Operand(rsp, 0), result_register());
+ break;
+ }
+ DoTest(context);
+ break;
+ }
+}
+
+
+void FullCodeGenerator::Apply(Expression::Context context,
+ Label* materialize_true,
+ Label* materialize_false) {
+ switch (context) {
+ case Expression::kUninitialized:
+
+ case Expression::kEffect:
+ ASSERT_EQ(materialize_true, materialize_false);
+ __ bind(materialize_true);
+ break;
+
+ case Expression::kValue: {
+ Label done;
+ switch (location_) {
+ case kAccumulator:
+ __ bind(materialize_true);
+ __ Move(result_register(), Factory::true_value());
+ __ jmp(&done);
+ __ bind(materialize_false);
+ __ Move(result_register(), Factory::false_value());
+ break;
+ case kStack:
+ __ bind(materialize_true);
+ __ Push(Factory::true_value());
+ __ jmp(&done);
+ __ bind(materialize_false);
+ __ Push(Factory::false_value());
+ break;
+ }
+ __ bind(&done);
+ break;
+ }
+
+ case Expression::kTest:
+ break;
+
+ case Expression::kValueTest:
+ __ bind(materialize_true);
+ switch (location_) {
+ case kAccumulator:
+ __ Move(result_register(), Factory::true_value());
+ break;
+ case kStack:
+ __ Push(Factory::true_value());
+ break;
+ }
+ __ jmp(true_label_);
+ break;
+
+ case Expression::kTestValue:
+ __ bind(materialize_false);
+ switch (location_) {
+ case kAccumulator:
+ __ Move(result_register(), Factory::false_value());
+ break;
+ case kStack:
+ __ Push(Factory::false_value());
+ break;
+ }
+ __ jmp(false_label_);
+ break;
+ }
+}
+
+
+void FullCodeGenerator::DoTest(Expression::Context context) {
+ // The value to test is in the accumulator. If the value might be needed
+ // on the stack (value/test and test/value contexts with a stack location
+ // desired), then the value is already duplicated on the stack.
+ ASSERT_NE(NULL, true_label_);
+ ASSERT_NE(NULL, false_label_);
+
+ // In value/test and test/value expression contexts with stack as the
+ // desired location, there is already an extra value on the stack. Use a
+ // label to discard it if unneeded.
+ Label discard;
+ Label* if_true = true_label_;
+ Label* if_false = false_label_;
+ switch (context) {
+ case Expression::kUninitialized:
+ case Expression::kEffect:
+ case Expression::kValue:
+ UNREACHABLE();
+ case Expression::kTest:
+ break;
+ case Expression::kValueTest:
+ switch (location_) {
+ case kAccumulator:
+ break;
+ case kStack:
+ if_false = &discard;
+ break;
+ }
+ break;
+ case Expression::kTestValue:
+ switch (location_) {
+ case kAccumulator:
+ break;
+ case kStack:
+ if_true = &discard;
+ break;
+ }
+ break;
+ }
+
+ // Emit the inlined tests assumed by the stub.
+ __ CompareRoot(result_register(), Heap::kUndefinedValueRootIndex);
+ __ j(equal, if_false);
+ __ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
+ __ j(equal, if_true);
+ __ CompareRoot(result_register(), Heap::kFalseValueRootIndex);
+ __ j(equal, if_false);
+ ASSERT_EQ(0, kSmiTag);
+ __ SmiCompare(result_register(), Smi::FromInt(0));
+ __ j(equal, if_false);
+ Condition is_smi = masm_->CheckSmi(result_register());
+ __ j(is_smi, if_true);
+
+ // Save a copy of the value if it may be needed and isn't already saved.
+ switch (context) {
+ case Expression::kUninitialized:
+ case Expression::kEffect:
+ case Expression::kValue:
+ UNREACHABLE();
+ case Expression::kTest:
+ break;
+ case Expression::kValueTest:
+ switch (location_) {
+ case kAccumulator:
+ __ push(result_register());
+ break;
+ case kStack:
+ break;
+ }
+ break;
+ case Expression::kTestValue:
+ switch (location_) {
+ case kAccumulator:
+ __ push(result_register());
+ break;
+ case kStack:
+ break;
+ }
+ break;
+ }
+
+ // Call the ToBoolean stub for all other cases.
+ ToBooleanStub stub;
+ __ push(result_register());
+ __ CallStub(&stub);
+ __ testq(rax, rax);
+
+ // The stub returns nonzero for true. Complete based on the context.
+ switch (context) {
+ case Expression::kUninitialized:
+ case Expression::kEffect:
+ case Expression::kValue:
+ UNREACHABLE();
+
+ case Expression::kTest:
+ __ j(not_zero, true_label_);
+ __ jmp(false_label_);
+ break;
+
+ case Expression::kValueTest:
+ switch (location_) {
+ case kAccumulator:
+ __ j(zero, &discard);
+ __ pop(result_register());
+ __ jmp(true_label_);
+ break;
+ case kStack:
+ __ j(not_zero, true_label_);
+ break;
+ }
+ __ bind(&discard);
+ __ Drop(1);
+ __ jmp(false_label_);
+ break;
+
+ case Expression::kTestValue:
+ switch (location_) {
+ case kAccumulator:
+ __ j(not_zero, &discard);
+ __ pop(result_register());
+ __ jmp(false_label_);
+ break;
+ case kStack:
+ __ j(zero, false_label_);
+ break;
+ }
+ __ bind(&discard);
+ __ Drop(1);
+ __ jmp(true_label_);
+ break;
+ }
+}
+
+
+MemOperand FullCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
+ switch (slot->type()) {
+ case Slot::PARAMETER:
+ case Slot::LOCAL:
+ return Operand(rbp, SlotOffset(slot));
+ case Slot::CONTEXT: {
+ int context_chain_length =
+ function_->scope()->ContextChainLength(slot->var()->scope());
+ __ LoadContext(scratch, context_chain_length);
+ return CodeGenerator::ContextOperand(scratch, slot->index());
+ }
+ case Slot::LOOKUP:
+ UNREACHABLE();
+ }
+ UNREACHABLE();
+ return Operand(rax, 0);
+}
+
+
+void FullCodeGenerator::Move(Register destination, Slot* source) {
+ MemOperand location = EmitSlotSearch(source, destination);
+ __ movq(destination, location);
+}
+
+
+void FullCodeGenerator::Move(Slot* dst,
+ Register src,
+ Register scratch1,
+ Register scratch2) {
+ ASSERT(dst->type() != Slot::LOOKUP); // Not yet implemented.
+ ASSERT(!scratch1.is(src) && !scratch2.is(src));
+ MemOperand location = EmitSlotSearch(dst, scratch1);
+ __ movq(location, src);
+ // Emit the write barrier code if the location is in the heap.
+ if (dst->type() == Slot::CONTEXT) {
+ int offset = FixedArray::kHeaderSize + dst->index() * kPointerSize;
+ __ RecordWrite(scratch1, offset, src, scratch2);
+ }
+}
+
+
+void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
+ Comment cmnt(masm_, "[ Declaration");
+ Variable* var = decl->proxy()->var();
+ ASSERT(var != NULL); // Must have been resolved.
+ Slot* slot = var->slot();
+ Property* prop = var->AsProperty();
+
+ if (slot != NULL) {
+ switch (slot->type()) {
+ case Slot::PARAMETER:
+ case Slot::LOCAL:
+ if (decl->mode() == Variable::CONST) {
+ __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
+ __ movq(Operand(rbp, SlotOffset(slot)), kScratchRegister);
+ } else if (decl->fun() != NULL) {
+ VisitForValue(decl->fun(), kAccumulator);
+ __ movq(Operand(rbp, SlotOffset(slot)), result_register());
+ }
+ break;
+
+ case Slot::CONTEXT:
+ // We bypass the general EmitSlotSearch because we know more about
+ // this specific context.
+
+ // The variable in the decl always resides in the current context.
+ ASSERT_EQ(0, function_->scope()->ContextChainLength(var->scope()));
+ if (FLAG_debug_code) {
+ // Check if we have the correct context pointer.
+ __ movq(rbx,
+ CodeGenerator::ContextOperand(rsi, Context::FCONTEXT_INDEX));
+ __ cmpq(rbx, rsi);
+ __ Check(equal, "Unexpected declaration in current context.");
+ }
+ if (decl->mode() == Variable::CONST) {
+ __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
+ __ movq(CodeGenerator::ContextOperand(rsi, slot->index()),
+ kScratchRegister);
+ // No write barrier since the hole value is in old space.
+ } else if (decl->fun() != NULL) {
+ VisitForValue(decl->fun(), kAccumulator);
+ __ movq(CodeGenerator::ContextOperand(rsi, slot->index()),
+ result_register());
+ int offset = Context::SlotOffset(slot->index());
+ __ movq(rbx, rsi);
+ __ RecordWrite(rbx, offset, result_register(), rcx);
+ }
+ break;
+
+ case Slot::LOOKUP: {
+ __ push(rsi);
+ __ Push(var->name());
+ // Declaration nodes are always introduced in one of two modes.
+ ASSERT(decl->mode() == Variable::VAR ||
+ decl->mode() == Variable::CONST);
+ PropertyAttributes attr =
+ (decl->mode() == Variable::VAR) ? NONE : READ_ONLY;
+ __ Push(Smi::FromInt(attr));
+ // Push initial value, if any.
+ // Note: For variables we must not push an initial value (such as
+ // 'undefined') because we may have a (legal) redeclaration and we
+ // must not destroy the current value.
+ if (decl->mode() == Variable::CONST) {
+ __ PushRoot(Heap::kTheHoleValueRootIndex);
+ } else if (decl->fun() != NULL) {
+ VisitForValue(decl->fun(), kStack);
+ } else {
+ __ Push(Smi::FromInt(0)); // no initial value!
+ }
+ __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+ break;
+ }
+ }
+
+ } else if (prop != NULL) {
+ if (decl->fun() != NULL || decl->mode() == Variable::CONST) {
+ // We are declaring a function or constant that rewrites to a
+ // property. Use (keyed) IC to set the initial value.
+ VisitForValue(prop->obj(), kStack);
+ VisitForValue(prop->key(), kStack);
+
+ if (decl->fun() != NULL) {
+ VisitForValue(decl->fun(), kAccumulator);
+ } else {
+ __ LoadRoot(result_register(), Heap::kTheHoleValueRootIndex);
+ }
+
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // Absence of a test rax instruction following the call
+ // indicates that none of the load was inlined.
+ __ nop();
+
+ // Value in rax is ignored (declarations are statements). Receiver
+ // and key on stack are discarded.
+ __ Drop(2);
+ }
+ }
+}
+
+
+void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
+ // Call the runtime to declare the globals.
+ __ push(rsi); // The context is the first argument.
+ __ Push(pairs);
+ __ Push(Smi::FromInt(is_eval_ ? 1 : 0));
+ __ CallRuntime(Runtime::kDeclareGlobals, 3);
+ // Return value is ignored.
+}
+
+
+void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
+ Comment cmnt(masm_, "[ FunctionLiteral");
+
+ // Build the function boilerplate and instantiate it.
+ Handle<JSFunction> boilerplate =
+ Compiler::BuildBoilerplate(expr, script_, this);
+ if (HasStackOverflow()) return;
+
+ ASSERT(boilerplate->IsBoilerplate());
+
+ // Create a new closure.
+ __ push(rsi);
+ __ Push(boilerplate);
+ __ CallRuntime(Runtime::kNewClosure, 2);
+ Apply(context_, rax);
+}
+
+
+void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
+ Comment cmnt(masm_, "[ VariableProxy");
+ EmitVariableLoad(expr->var(), context_);
+}
+
+
+void FullCodeGenerator::EmitVariableLoad(Variable* var,
+ Expression::Context context) {
+ // Four cases: non-this global variables, lookup slots, all other
+ // types of slots, and parameters that rewrite to explicit property
+ // accesses on the arguments object.
+ Slot* slot = var->slot();
+ Property* property = var->AsProperty();
+
+ if (var->is_global() && !var->is_this()) {
+ Comment cmnt(masm_, "Global variable");
+ // Use inline caching. Variable name is passed in rcx and the global
+ // object on the stack.
+ __ push(CodeGenerator::GlobalObject());
+ __ Move(rcx, var->name());
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ // A test rax instruction following the call is used by the IC to
+ // indicate that the inobject property case was inlined. Ensure there
+ // is no test rax instruction here.
+ __ nop();
+ DropAndApply(1, context, rax);
+
+ } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
+ Comment cmnt(masm_, "Lookup slot");
+ __ push(rsi); // Context.
+ __ Push(var->name());
+ __ CallRuntime(Runtime::kLoadContextSlot, 2);
+ Apply(context, rax);
+
+ } else if (slot != NULL) {
+ Comment cmnt(masm_, (slot->type() == Slot::CONTEXT)
+ ? "Context slot"
+ : "Stack slot");
+ Apply(context, slot);
+
+ } else {
+ Comment cmnt(masm_, "Rewritten parameter");
+ ASSERT_NOT_NULL(property);
+ // Rewritten parameter accesses are of the form "slot[literal]".
+
+ // Assert that the object is in a slot.
+ Variable* object_var = property->obj()->AsVariableProxy()->AsVariable();
+ ASSERT_NOT_NULL(object_var);
+ Slot* object_slot = object_var->slot();
+ ASSERT_NOT_NULL(object_slot);
+
+ // Load the object.
+ MemOperand object_loc = EmitSlotSearch(object_slot, rax);
+ __ push(object_loc);
+
+ // Assert that the key is a smi.
+ Literal* key_literal = property->key()->AsLiteral();
+ ASSERT_NOT_NULL(key_literal);
+ ASSERT(key_literal->handle()->IsSmi());
+
+ // Load the key.
+ __ Push(key_literal->handle());
+
+ // Do a keyed property load.
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // Notice: We must not have a "test rax, ..." instruction after the
+ // call. It is treated specially by the LoadIC code.
+ __ nop();
+ // Drop key and object left on the stack by IC, and push the result.
+ DropAndApply(2, context, rax);
+ }
+}
+
+
+void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
+ Comment cmnt(masm_, "[ RegExpLiteral");
+ Label done;
+ // Registers will be used as follows:
+ // rdi = JS function.
+ // rbx = literals array.
+ // rax = regexp literal.
+ __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movq(rbx, FieldOperand(rdi, JSFunction::kLiteralsOffset));
+ int literal_offset =
+ FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
+ __ movq(rax, FieldOperand(rbx, literal_offset));
+ __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
+ __ j(not_equal, &done);
+ // Create regexp literal using runtime function
+ // Result will be in rax.
+ __ push(rbx);
+ __ Push(Smi::FromInt(expr->literal_index()));
+ __ Push(expr->pattern());
+ __ Push(expr->flags());
+ __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
+ __ bind(&done);
+ Apply(context_, rax);
+}
+
+
+void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
+ Comment cmnt(masm_, "[ ObjectLiteral");
+ __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ push(FieldOperand(rdi, JSFunction::kLiteralsOffset));
+ __ Push(Smi::FromInt(expr->literal_index()));
+ __ Push(expr->constant_properties());
+ if (expr->depth() > 1) {
+ __ CallRuntime(Runtime::kCreateObjectLiteral, 3);
+ } else {
+ __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 3);
+ }
+
+ // If result_saved is true the result is on top of the stack. If
+ // result_saved is false the result is in rax.
+ bool result_saved = false;
+
+ for (int i = 0; i < expr->properties()->length(); i++) {
+ ObjectLiteral::Property* property = expr->properties()->at(i);
+ if (property->IsCompileTimeValue()) continue;
+
+ Literal* key = property->key();
+ Expression* value = property->value();
+ if (!result_saved) {
+ __ push(rax); // Save result on the stack
+ result_saved = true;
+ }
+ switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT:
+ UNREACHABLE();
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+ ASSERT(!CompileTimeValue::IsCompileTimeValue(value));
+ // Fall through.
+ case ObjectLiteral::Property::COMPUTED:
+ if (key->handle()->IsSymbol()) {
+ VisitForValue(value, kAccumulator);
+ __ Move(rcx, key->handle());
+ __ movq(rdx, Operand(rsp, 0));
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ __ nop();
+ break;
+ }
+ // Fall through.
+ case ObjectLiteral::Property::PROTOTYPE:
+ __ push(Operand(rsp, 0)); // Duplicate receiver.
+ VisitForValue(key, kStack);
+ VisitForValue(value, kStack);
+ __ CallRuntime(Runtime::kSetProperty, 3);
+ break;
+ case ObjectLiteral::Property::SETTER:
+ case ObjectLiteral::Property::GETTER:
+ __ push(Operand(rsp, 0)); // Duplicate receiver.
+ VisitForValue(key, kStack);
+ __ Push(property->kind() == ObjectLiteral::Property::SETTER ?
+ Smi::FromInt(1) :
+ Smi::FromInt(0));
+ VisitForValue(value, kStack);
+ __ CallRuntime(Runtime::kDefineAccessor, 4);
+ break;
+ }
+ }
+
+ if (result_saved) {
+ ApplyTOS(context_);
+ } else {
+ Apply(context_, rax);
+ }
+}
+
+
+void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
+ Comment cmnt(masm_, "[ ArrayLiteral");
+ __ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
+ __ Push(Smi::FromInt(expr->literal_index()));
+ __ Push(expr->constant_elements());
+ if (expr->depth() > 1) {
+ __ CallRuntime(Runtime::kCreateArrayLiteral, 3);
+ } else {
+ __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
+ }
+
+ bool result_saved = false; // Is the result saved to the stack?
+
+ // Emit code to evaluate all the non-constant subexpressions and to store
+ // them into the newly cloned array.
+ ZoneList<Expression*>* subexprs = expr->values();
+ for (int i = 0, len = subexprs->length(); i < len; i++) {
+ Expression* subexpr = subexprs->at(i);
+ // If the subexpression is a literal or a simple materialized literal it
+ // is already set in the cloned array.
+ if (subexpr->AsLiteral() != NULL ||
+ CompileTimeValue::IsCompileTimeValue(subexpr)) {
+ continue;
+ }
+
+ if (!result_saved) {
+ __ push(rax);
+ result_saved = true;
+ }
+ VisitForValue(subexpr, kAccumulator);
+
+ // Store the subexpression value in the array's elements.
+ __ movq(rbx, Operand(rsp, 0)); // Copy of array literal.
+ __ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
+ int offset = FixedArray::kHeaderSize + (i * kPointerSize);
+ __ movq(FieldOperand(rbx, offset), result_register());
+
+ // Update the write barrier for the array store.
+ __ RecordWrite(rbx, offset, result_register(), rcx);
+ }
+
+ if (result_saved) {
+ ApplyTOS(context_);
+ } else {
+ Apply(context_, rax);
+ }
+}
+
+
+void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
+ SetSourcePosition(prop->position());
+ Literal* key = prop->key()->AsLiteral();
+ __ Move(rcx, key->handle());
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ __ nop();
+}
+
+
+void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
+ SetSourcePosition(prop->position());
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ __ nop();
+}
+
+
+void FullCodeGenerator::EmitBinaryOp(Token::Value op,
+ Expression::Context context) {
+ __ push(result_register());
+ GenericBinaryOpStub stub(op,
+ NO_OVERWRITE,
+ NO_GENERIC_BINARY_FLAGS);
+ __ CallStub(&stub);
+ Apply(context, rax);
+}
+
+
+void FullCodeGenerator::EmitVariableAssignment(Variable* var,
+ Expression::Context context) {
+ // Three main cases: non-this global variables, lookup slots, and
+ // all other types of slots. Left-hand-side parameters that rewrite
+ // to explicit property accesses do not reach here.
+ ASSERT(var != NULL);
+ ASSERT(var->is_global() || var->slot() != NULL);
+ Slot* slot = var->slot();
+ if (var->is_global()) {
+ ASSERT(!var->is_this());
+ // Assignment to a global variable. Use inline caching for the
+ // assignment. Right-hand-side value is passed in rax, variable name in
+ // rcx, and the global object in rdx.
+ __ Move(rcx, var->name());
+ __ movq(rdx, CodeGenerator::GlobalObject());
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ Apply(context, rax);
+
+ } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
+ __ push(result_register()); // Value.
+ __ push(rsi); // Context.
+ __ Push(var->name());
+ __ CallRuntime(Runtime::kStoreContextSlot, 3);
+ Apply(context, rax);
+
+ } else if (var->slot() != NULL) {
+ switch (slot->type()) {
+ case Slot::LOCAL:
+ case Slot::PARAMETER:
+ __ movq(Operand(rbp, SlotOffset(slot)), result_register());
+ break;
+
+ case Slot::CONTEXT: {
+ MemOperand target = EmitSlotSearch(slot, rcx);
+ __ movq(target, result_register());
+
+ // RecordWrite may destroy all its register arguments.
+ __ movq(rdx, result_register());
+ int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
+ __ RecordWrite(rcx, offset, rdx, rbx);
+ break;
+ }
+
+ case Slot::LOOKUP:
+ UNREACHABLE();
+ break;
+ }
+ Apply(context, result_register());
+
+ } else {
+ // Variables rewritten as properties are not treated as variables in
+ // assignments.
+ UNREACHABLE();
+ }
+}
+
+
+void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
+ // Assignment to a property, using a named store IC.
+ Property* prop = expr->target()->AsProperty();
+ ASSERT(prop != NULL);
+ ASSERT(prop->key()->AsLiteral() != NULL);
+
+ // If the assignment starts a block of assignments to the same object,
+ // change to slow case to avoid the quadratic behavior of repeatedly
+ // adding fast properties.
+ if (expr->starts_initialization_block()) {
+ __ push(result_register());
+ __ push(Operand(rsp, kPointerSize)); // Receiver is now under value.
+ __ CallRuntime(Runtime::kToSlowProperties, 1);
+ __ pop(result_register());
+ }
+
+ // Record source code position before IC call.
+ SetSourcePosition(expr->position());
+ __ Move(rcx, prop->key()->AsLiteral()->handle());
+ if (expr->ends_initialization_block()) {
+ __ movq(rdx, Operand(rsp, 0));
+ } else {
+ __ pop(rdx);
+ }
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ __ nop();
+
+ // If the assignment ends an initialization block, revert to fast case.
+ if (expr->ends_initialization_block()) {
+ __ push(rax); // Result of assignment, saved even if not needed.
+ __ push(Operand(rsp, kPointerSize)); // Receiver is under value.
+ __ CallRuntime(Runtime::kToFastProperties, 1);
+ __ pop(rax);
+ DropAndApply(1, context_, rax);
+ } else {
+ Apply(context_, rax);
+ }
+}
+
+
+void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
+ // Assignment to a property, using a keyed store IC.
+
+ // If the assignment starts a block of assignments to the same object,
+ // change to slow case to avoid the quadratic behavior of repeatedly
+ // adding fast properties.
+ if (expr->starts_initialization_block()) {
+ __ push(result_register());
+ // Receiver is now under the key and value.
+ __ push(Operand(rsp, 2 * kPointerSize));
+ __ CallRuntime(Runtime::kToSlowProperties, 1);
+ __ pop(result_register());
+ }
+
+ // Record source code position before IC call.
+ SetSourcePosition(expr->position());
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ // This nop signals to the IC that there is no inlined code at the call
+ // site for it to patch.
+ __ nop();
+
+ // If the assignment ends an initialization block, revert to fast case.
+ if (expr->ends_initialization_block()) {
+ __ push(rax); // Result of assignment, saved even if not needed.
+ // Receiver is under the key and value.
+ __ push(Operand(rsp, 2 * kPointerSize));
+ __ CallRuntime(Runtime::kToFastProperties, 1);
+ __ pop(rax);
+ }
+
+ // Receiver and key are still on stack.
+ DropAndApply(2, context_, rax);
+}
+
+
+void FullCodeGenerator::VisitProperty(Property* expr) {
+ Comment cmnt(masm_, "[ Property");
+ Expression* key = expr->key();
+
+ // Evaluate receiver.
+ VisitForValue(expr->obj(), kStack);
+
+ if (key->IsPropertyName()) {
+ EmitNamedPropertyLoad(expr);
+ // Drop receiver left on the stack by IC.
+ DropAndApply(1, context_, rax);
+ } else {
+ VisitForValue(expr->key(), kStack);
+ EmitKeyedPropertyLoad(expr);
+ // Drop key and receiver left on the stack by IC.
+ DropAndApply(2, context_, rax);
+ }
+}
+
+
+void FullCodeGenerator::EmitCallWithIC(Call* expr,
+ Handle<Object> ignored,
+ RelocInfo::Mode mode) {
+ // Code common for calls using the IC.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForValue(args->at(i), kStack);
+ }
+ // Record source position for debugger.
+ SetSourcePosition(expr->position());
+ // Call the IC initialization code.
+ InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
+ Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
+ in_loop);
+ __ Call(ic, mode);
+ // Restore context register.
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ // Discard the function left on TOS.
+ DropAndApply(1, context_, rax);
+}
+
+
+void FullCodeGenerator::EmitCallWithStub(Call* expr) {
+ // Code common for calls using the call stub.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForValue(args->at(i), kStack);
+ }
+ // Record source position for debugger.
+ SetSourcePosition(expr->position());
+ CallFunctionStub stub(arg_count, NOT_IN_LOOP, RECEIVER_MIGHT_BE_VALUE);
+ __ CallStub(&stub);
+ // Restore context register.
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ // Discard the function left on TOS.
+ DropAndApply(1, context_, rax);
+}
+
+
+void FullCodeGenerator::VisitCall(Call* expr) {
+ Comment cmnt(masm_, "[ Call");
+ Expression* fun = expr->expression();
+ Variable* var = fun->AsVariableProxy()->AsVariable();
+
+ if (var != NULL && var->is_possibly_eval()) {
+ // Call to the identifier 'eval'.
+ UNREACHABLE();
+ } else if (var != NULL && !var->is_this() && var->is_global()) {
+ // Call to a global variable.
+ __ Push(var->name());
+ // Push global object as receiver for the call IC lookup.
+ __ push(CodeGenerator::GlobalObject());
+ EmitCallWithIC(expr, var->name(), RelocInfo::CODE_TARGET_CONTEXT);
+ } else if (var != NULL && var->slot() != NULL &&
+ var->slot()->type() == Slot::LOOKUP) {
+ // Call to a lookup slot.
+ UNREACHABLE();
+ } else if (fun->AsProperty() != NULL) {
+ // Call to an object property.
+ Property* prop = fun->AsProperty();
+ Literal* key = prop->key()->AsLiteral();
+ if (key != NULL && key->handle()->IsSymbol()) {
+ // Call to a named property, use call IC.
+ __ Push(key->handle());
+ VisitForValue(prop->obj(), kStack);
+ EmitCallWithIC(expr, key->handle(), RelocInfo::CODE_TARGET);
+ } else {
+ // Call to a keyed property, use keyed load IC followed by function
+ // call.
+ VisitForValue(prop->obj(), kStack);
+ VisitForValue(prop->key(), kStack);
+ // Record source code position for IC call.
+ SetSourcePosition(prop->position());
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // By emitting a nop we make sure that we do not have a "test rax,..."
+ // instruction after the call it is treated specially by the LoadIC code.
+ __ nop();
+ // Drop key left on the stack by IC.
+ __ Drop(1);
+ // Pop receiver.
+ __ pop(rbx);
+ // Push result (function).
+ __ push(rax);
+ // Push receiver object on stack.
+ if (prop->is_synthetic()) {
+ __ movq(rcx, CodeGenerator::GlobalObject());
+ __ push(FieldOperand(rcx, GlobalObject::kGlobalReceiverOffset));
+ } else {
+ __ push(rbx);
+ }
+ EmitCallWithStub(expr);
+ }
+ } else {
+ // Call to some other expression. If the expression is an anonymous
+ // function literal not called in a loop, mark it as one that should
+ // also use the fast code generator.
+ FunctionLiteral* lit = fun->AsFunctionLiteral();
+ if (lit != NULL &&
+ lit->name()->Equals(Heap::empty_string()) &&
+ loop_depth() == 0) {
+ lit->set_try_full_codegen(true);
+ }
+ VisitForValue(fun, kStack);
+ // Load global receiver object.
+ __ movq(rbx, CodeGenerator::GlobalObject());
+ __ push(FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
+ // Emit function call.
+ EmitCallWithStub(expr);
+ }
+}
+
+
+void FullCodeGenerator::VisitCallNew(CallNew* expr) {
+ Comment cmnt(masm_, "[ CallNew");
+ // According to ECMA-262, section 11.2.2, page 44, the function
+ // expression in new calls must be evaluated before the
+ // arguments.
+ // Push function on the stack.
+ VisitForValue(expr->expression(), kStack);
+
+ // Push global object (receiver).
+ __ push(CodeGenerator::GlobalObject());
+
+ // Push the arguments ("left-to-right") on the stack.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForValue(args->at(i), kStack);
+ }
+
+ // Call the construct call builtin that handles allocation and
+ // constructor invocation.
+ SetSourcePosition(expr->position());
+
+ // Load function, arg_count into rdi and rax.
+ __ Set(rax, arg_count);
+ // Function is in rsp[arg_count + 1].
+ __ movq(rdi, Operand(rsp, rax, times_pointer_size, kPointerSize));
+
+ Handle<Code> construct_builtin(Builtins::builtin(Builtins::JSConstructCall));
+ __ Call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
+
+ // Replace function on TOS with result in rax, or pop it.
+ DropAndApply(1, context_, rax);
+}
+
+
+void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
+ Comment cmnt(masm_, "[ CallRuntime");
+ ZoneList<Expression*>* args = expr->arguments();
+
+ if (expr->is_jsruntime()) {
+ // Prepare for calling JS runtime function.
+ __ Push(expr->name());
+ __ movq(rax, CodeGenerator::GlobalObject());
+ __ push(FieldOperand(rax, GlobalObject::kBuiltinsOffset));
+ }
+
+ // Push the arguments ("left-to-right").
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForValue(args->at(i), kStack);
+ }
+
+ if (expr->is_jsruntime()) {
+ // Call the JS runtime function.
+ Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
+ NOT_IN_LOOP);
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // Restore context register.
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ // Discard the function left on TOS.
+ DropAndApply(1, context_, rax);
+ } else {
+ __ CallRuntime(expr->function(), arg_count);
+ Apply(context_, rax);
+ }
+}
+
+
+void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
+ switch (expr->op()) {
+ case Token::VOID: {
+ Comment cmnt(masm_, "[ UnaryOperation (VOID)");
+ VisitForEffect(expr->expression());
+ switch (context_) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ break;
+ case Expression::kEffect:
+ break;
+ case Expression::kValue:
+ switch (location_) {
+ case kAccumulator:
+ __ LoadRoot(result_register(), Heap::kUndefinedValueRootIndex);
+ break;
+ case kStack:
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
+ break;
+ }
+ break;
+ case Expression::kTestValue:
+ // Value is false so it's needed.
+ switch (location_) {
+ case kAccumulator:
+ __ LoadRoot(result_register(), Heap::kUndefinedValueRootIndex);
+ break;
+ case kStack:
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
+ break;
+ }
+ // Fall through.
+ case Expression::kTest:
+ case Expression::kValueTest:
+ __ jmp(false_label_);
+ break;
+ }
+ break;
+ }
+
+ case Token::NOT: {
+ Comment cmnt(masm_, "[ UnaryOperation (NOT)");
+ Label materialize_true, materialize_false, done;
+ // Initially assume a pure test context. Notice that the labels are
+ // swapped.
+ Label* if_true = false_label_;
+ Label* if_false = true_label_;
+ switch (context_) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ break;
+ case Expression::kEffect:
+ if_true = &done;
+ if_false = &done;
+ break;
+ case Expression::kValue:
+ if_true = &materialize_false;
+ if_false = &materialize_true;
+ break;
+ case Expression::kTest:
+ break;
+ case Expression::kValueTest:
+ if_false = &materialize_true;
+ break;
+ case Expression::kTestValue:
+ if_true = &materialize_false;
+ break;
+ }
+ VisitForControl(expr->expression(), if_true, if_false);
+ Apply(context_, if_false, if_true); // Labels swapped.
+ break;
+ }
+
+ case Token::TYPEOF: {
+ Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
+ VariableProxy* proxy = expr->expression()->AsVariableProxy();
+ if (proxy != NULL &&
+ !proxy->var()->is_this() &&
+ proxy->var()->is_global()) {
+ Comment cmnt(masm_, "Global variable");
+ __ push(CodeGenerator::GlobalObject());
+ __ Move(rcx, proxy->name());
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ // Use a regular load, not a contextual load, to avoid a reference
+ // error.
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ __ movq(Operand(rsp, 0), rax);
+ } else if (proxy != NULL &&
+ proxy->var()->slot() != NULL &&
+ proxy->var()->slot()->type() == Slot::LOOKUP) {
+ __ push(rsi);
+ __ Push(proxy->name());
+ __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+ __ push(rax);
+ } else {
+ // This expression cannot throw a reference error at the top level.
+ VisitForValue(expr->expression(), kStack);
+ }
+
+ __ CallRuntime(Runtime::kTypeof, 1);
+ Apply(context_, rax);
+ break;
+ }
+
+ case Token::ADD: {
+ Comment cmt(masm_, "[ UnaryOperation (ADD)");
+ VisitForValue(expr->expression(), kAccumulator);
+ Label no_conversion;
+ Condition is_smi = masm_->CheckSmi(result_register());
+ __ j(is_smi, &no_conversion);
+ __ push(result_register());
+ __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
+ __ bind(&no_conversion);
+ Apply(context_, result_register());
+ break;
+ }
+
+ case Token::SUB: {
+ Comment cmt(masm_, "[ UnaryOperation (SUB)");
+ bool overwrite =
+ (expr->expression()->AsBinaryOperation() != NULL &&
+ expr->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
+ GenericUnaryOpStub stub(Token::SUB, overwrite);
+ // GenericUnaryOpStub expects the argument to be in the
+ // accumulator register rax.
+ VisitForValue(expr->expression(), kAccumulator);
+ __ CallStub(&stub);
+ Apply(context_, rax);
+ break;
+ }
+
+ case Token::BIT_NOT: {
+ Comment cmt(masm_, "[ UnaryOperation (BIT_NOT)");
+ bool overwrite =
+ (expr->expression()->AsBinaryOperation() != NULL &&
+ expr->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
+ GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
+ // GenericUnaryOpStub expects the argument to be in the
+ // accumulator register rax.
+ VisitForValue(expr->expression(), kAccumulator);
+ // Avoid calling the stub for Smis.
+ Label smi, done;
+ Condition is_smi = masm_->CheckSmi(result_register());
+ __ j(is_smi, &smi);
+ // Non-smi: call stub leaving result in accumulator register.
+ __ CallStub(&stub);
+ __ jmp(&done);
+ // Perform operation directly on Smis.
+ __ bind(&smi);
+ __ SmiNot(result_register(), result_register());
+ __ bind(&done);
+ Apply(context_, result_register());
+ break;
+ }
+
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
+ Comment cmnt(masm_, "[ CountOperation");
+
+ // Expression can only be a property, a global or a (parameter or local)
+ // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+ enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+ LhsKind assign_type = VARIABLE;
+ Property* prop = expr->expression()->AsProperty();
+ // In case of a property we use the uninitialized expression context
+ // of the key to detect a named property.
+ if (prop != NULL) {
+ assign_type =
+ (prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
+ }
+
+ // Evaluate expression and get value.
+ if (assign_type == VARIABLE) {
+ ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
+ Location saved_location = location_;
+ location_ = kAccumulator;
+ EmitVariableLoad(expr->expression()->AsVariableProxy()->var(),
+ Expression::kValue);
+ location_ = saved_location;
+ } else {
+ // Reserve space for result of postfix operation.
+ if (expr->is_postfix() && context_ != Expression::kEffect) {
+ __ Push(Smi::FromInt(0));
+ }
+ VisitForValue(prop->obj(), kStack);
+ if (assign_type == NAMED_PROPERTY) {
+ EmitNamedPropertyLoad(prop);
+ } else {
+ VisitForValue(prop->key(), kStack);
+ EmitKeyedPropertyLoad(prop);
+ }
+ }
+
+ // Call ToNumber only if operand is not a smi.
+ Label no_conversion;
+ Condition is_smi;
+ is_smi = masm_->CheckSmi(rax);
+ __ j(is_smi, &no_conversion);
+ __ push(rax);
+ __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
+ __ bind(&no_conversion);
+
+ // Save result for postfix expressions.
+ if (expr->is_postfix()) {
+ switch (context_) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ // Do not save result.
+ break;
+ case Expression::kValue:
+ case Expression::kTest:
+ case Expression::kValueTest:
+ case Expression::kTestValue:
+ // Save the result on the stack. If we have a named or keyed property
+ // we store the result under the receiver that is currently on top
+ // of the stack.
+ switch (assign_type) {
+ case VARIABLE:
+ __ push(rax);
+ break;
+ case NAMED_PROPERTY:
+ __ movq(Operand(rsp, kPointerSize), rax);
+ break;
+ case KEYED_PROPERTY:
+ __ movq(Operand(rsp, 2 * kPointerSize), rax);
+ break;
+ }
+ break;
+ }
+ }
+
+ // Inline smi case if we are in a loop.
+ Label stub_call, done;
+ if (loop_depth() > 0) {
+ if (expr->op() == Token::INC) {
+ __ SmiAddConstant(rax, rax, Smi::FromInt(1));
+ } else {
+ __ SmiSubConstant(rax, rax, Smi::FromInt(1));
+ }
+ __ j(overflow, &stub_call);
+ // We could eliminate this smi check if we split the code at
+ // the first smi check before calling ToNumber.
+ is_smi = masm_->CheckSmi(rax);
+ __ j(is_smi, &done);
+ __ bind(&stub_call);
+ // Call stub. Undo operation first.
+ if (expr->op() == Token::INC) {
+ __ SmiSubConstant(rax, rax, Smi::FromInt(1));
+ } else {
+ __ SmiAddConstant(rax, rax, Smi::FromInt(1));
+ }
+ }
+ // Call stub for +1/-1.
+ GenericBinaryOpStub stub(expr->binary_op(),
+ NO_OVERWRITE,
+ NO_GENERIC_BINARY_FLAGS);
+ stub.GenerateCall(masm_, rax, Smi::FromInt(1));
+ __ bind(&done);
+
+ // Store the value returned in rax.
+ switch (assign_type) {
+ case VARIABLE:
+ if (expr->is_postfix()) {
+ EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+ Expression::kEffect);
+ // For all contexts except kEffect: We have the result on
+ // top of the stack.
+ if (context_ != Expression::kEffect) {
+ ApplyTOS(context_);
+ }
+ } else {
+ EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+ context_);
+ }
+ break;
+ case NAMED_PROPERTY: {
+ __ Move(rcx, prop->key()->AsLiteral()->handle());
+ __ pop(rdx);
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // This nop signals to the IC that there is no inlined code at the call
+ // site for it to patch.
+ __ nop();
+ if (expr->is_postfix()) {
+ if (context_ != Expression::kEffect) {
+ ApplyTOS(context_);
+ }
+ } else {
+ Apply(context_, rax);
+ }
+ break;
+ }
+ case KEYED_PROPERTY: {
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // This nop signals to the IC that there is no inlined code at the call
+ // site for it to patch.
+ __ nop();
+ if (expr->is_postfix()) {
+ __ Drop(2); // Result is on the stack under the key and the receiver.
+ if (context_ != Expression::kEffect) {
+ ApplyTOS(context_);
+ }
+ } else {
+ DropAndApply(2, context_, rax);
+ }
+ break;
+ }
+ }
+}
+
+void FullCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
+ Comment cmnt(masm_, "[ BinaryOperation");
+ switch (expr->op()) {
+ case Token::COMMA:
+ VisitForEffect(expr->left());
+ Visit(expr->right());
+ break;
+
+ case Token::OR:
+ case Token::AND:
+ EmitLogicalOperation(expr);
+ break;
+
+ case Token::ADD:
+ case Token::SUB:
+ case Token::DIV:
+ case Token::MOD:
+ case Token::MUL:
+ case Token::BIT_OR:
+ case Token::BIT_AND:
+ case Token::BIT_XOR:
+ case Token::SHL:
+ case Token::SHR:
+ case Token::SAR:
+ VisitForValue(expr->left(), kStack);
+ VisitForValue(expr->right(), kAccumulator);
+ EmitBinaryOp(expr->op(), context_);
+ break;
+
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
+ Comment cmnt(masm_, "[ CompareOperation");
+
+ // Always perform the comparison for its control flow. Pack the result
+ // into the expression's context after the comparison is performed.
+ Label materialize_true, materialize_false, done;
+ // Initially assume we are in a test context.
+ Label* if_true = true_label_;
+ Label* if_false = false_label_;
+ switch (context_) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ break;
+ case Expression::kEffect:
+ if_true = &done;
+ if_false = &done;
+ break;
+ case Expression::kValue:
+ if_true = &materialize_true;
+ if_false = &materialize_false;
+ break;
+ case Expression::kTest:
+ break;
+ case Expression::kValueTest:
+ if_true = &materialize_true;
+ break;
+ case Expression::kTestValue:
+ if_false = &materialize_false;
+ break;
+ }
+
+ VisitForValue(expr->left(), kStack);
+ switch (expr->op()) {
+ case Token::IN:
+ VisitForValue(expr->right(), kStack);
+ __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
+ __ CompareRoot(rax, Heap::kTrueValueRootIndex);
+ __ j(equal, if_true);
+ __ jmp(if_false);
+ break;
+
+ case Token::INSTANCEOF: {
+ VisitForValue(expr->right(), kStack);
+ InstanceofStub stub;
+ __ CallStub(&stub);
+ __ testq(rax, rax);
+ __ j(zero, if_true); // The stub returns 0 for true.
+ __ jmp(if_false);
+ break;
+ }
+
+ default: {
+ VisitForValue(expr->right(), kAccumulator);
+ Condition cc = no_condition;
+ bool strict = false;
+ switch (expr->op()) {
+ case Token::EQ_STRICT:
+ strict = true;
+ // Fall through.
+ case Token::EQ:
+ cc = equal;
+ __ pop(rdx);
+ break;
+ case Token::LT:
+ cc = less;
+ __ pop(rdx);
+ break;
+ case Token::GT:
+ // Reverse left and right sizes to obtain ECMA-262 conversion order.
+ cc = less;
+ __ movq(rdx, result_register());
+ __ pop(rax);
+ break;
+ case Token::LTE:
+ // Reverse left and right sizes to obtain ECMA-262 conversion order.
+ cc = greater_equal;
+ __ movq(rdx, result_register());
+ __ pop(rax);
+ break;
+ case Token::GTE:
+ cc = greater_equal;
+ __ pop(rdx);
+ break;
+ case Token::IN:
+ case Token::INSTANCEOF:
+ default:
+ UNREACHABLE();
+ }
+
+ // The comparison stub expects the smi vs. smi case to be handled
+ // before it is called.
+ Label slow_case;
+ __ JumpIfNotBothSmi(rax, rdx, &slow_case);
+ __ SmiCompare(rdx, rax);
+ __ j(cc, if_true);
+ __ jmp(if_false);
+
+ __ bind(&slow_case);
+ CompareStub stub(cc, strict);
+ __ CallStub(&stub);
+ __ testq(rax, rax);
+ __ j(cc, if_true);
+ __ jmp(if_false);
+ }
+ }
+
+ // Convert the result of the comparison into one expected for this
+ // expression's context.
+ Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
+ __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ Apply(context_, rax);
+}
+
+
+Register FullCodeGenerator::result_register() { return rax; }
+
+
+Register FullCodeGenerator::context_register() { return rsi; }
+
+
+void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
+ ASSERT(IsAligned(frame_offset, kPointerSize));
+ __ movq(Operand(rbp, frame_offset), value);
+}
+
+
+void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
+ __ movq(dst, CodeGenerator::ContextOperand(rsi, context_index));
+}
+
+
+// ----------------------------------------------------------------------------
+// Non-local control flow support.
+
+
+void FullCodeGenerator::EnterFinallyBlock() {
+ ASSERT(!result_register().is(rdx));
+ ASSERT(!result_register().is(rcx));
+ // Cook return address on top of stack (smi encoded Code* delta)
+ __ movq(rdx, Operand(rsp, 0));
+ __ Move(rcx, masm_->CodeObject());
+ __ subq(rdx, rcx);
+ __ Integer32ToSmi(rdx, rdx);
+ __ movq(Operand(rsp, 0), rdx);
+ // Store result register while executing finally block.
+ __ push(result_register());
+}
+
+
+void FullCodeGenerator::ExitFinallyBlock() {
+ ASSERT(!result_register().is(rdx));
+ ASSERT(!result_register().is(rcx));
+ // Restore result register from stack.
+ __ pop(result_register());
+ // Uncook return address.
+ __ movq(rdx, Operand(rsp, 0));
+ __ SmiToInteger32(rdx, rdx);
+ __ Move(rcx, masm_->CodeObject());
+ __ addq(rdx, rcx);
+ __ movq(Operand(rsp, 0), rdx);
+ // And return.
+ __ ret(0);
+}
+
+
+#undef __
+
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/x64/ic-x64.cc b/deps/v8/src/x64/ic-x64.cc
index a0f87ad4a..28bfd2ee6 100644
--- a/deps/v8/src/x64/ic-x64.cc
+++ b/deps/v8/src/x64/ic-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -271,11 +271,10 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
__ CmpObjectType(rcx, JS_OBJECT_TYPE, rdx);
__ j(below, &slow);
- // Check that the receiver does not require access checks. We need
- // to check this explicitly since this generic stub does not perform
- // map checks. The map is already in rdx.
+
+ // Check bit field.
__ testb(FieldOperand(rdx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsAccessCheckNeeded));
+ Immediate(kSlowCaseBitFieldMask));
__ j(not_zero, &slow);
// Check that the key is a smi.
@@ -415,6 +414,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
// -- rsp[8] : name
// -- rsp[16] : receiver
// -----------------------------------
+
GenerateGeneric(masm);
}
@@ -982,7 +982,7 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// rsp[16] argument argc - 1
// ...
// rsp[argc * 8] argument 1
- // rsp[(argc + 1) * 8] argument 0 = reciever
+ // rsp[(argc + 1) * 8] argument 0 = receiver
// rsp[(argc + 2) * 8] function name
// -----------------------------------
Label number, non_number, non_string, boolean, probe, miss;
@@ -1078,10 +1078,9 @@ void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
// rsp[16] argument argc - 1
// ...
// rsp[argc * 8] argument 1
- // rsp[(argc + 1) * 8] argument 0 = reciever
+ // rsp[(argc + 1) * 8] argument 0 = receiver
// rsp[(argc + 2) * 8] function name
// -----------------------------------
-
Label miss, global_object, non_global_object;
// Get the receiver of the function from the stack.
@@ -1179,7 +1178,6 @@ void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
// -- rsp[0] : return address
// -- rsp[8] : receiver
// -----------------------------------
-
Label miss;
__ movq(rax, Operand(rsp, kPointerSize));
@@ -1196,7 +1194,6 @@ void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
// -- rsp[0] : return address
// -- rsp[8] : receiver
// -----------------------------------
-
Label miss;
__ movq(rax, Operand(rsp, kPointerSize));
@@ -1244,7 +1241,6 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
// -- rsp[0] : return address
// -- rsp[8] : receiver
// -----------------------------------
-
Label miss, probe, global;
__ movq(rax, Operand(rsp, kPointerSize));
@@ -1292,7 +1288,6 @@ void LoadIC::GenerateStringLength(MacroAssembler* masm) {
// -- rsp[0] : return address
// -- rsp[8] : receiver
// -----------------------------------
-
Label miss;
__ movq(rax, Operand(rsp, kPointerSize));
@@ -1331,33 +1326,34 @@ bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
return true;
}
-void StoreIC::Generate(MacroAssembler* masm, ExternalReference const& f) {
+void StoreIC::GenerateMiss(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : name
+ // -- rdx : receiver
// -- rsp[0] : return address
- // -- rsp[8] : receiver
// -----------------------------------
+
__ pop(rbx);
- __ push(Operand(rsp, 0)); // receiver
+ __ push(rdx); // receiver
__ push(rcx); // name
__ push(rax); // value
__ push(rbx); // return address
// Perform tail call to the entry.
- __ TailCallRuntime(f, 3, 1);
+ __ TailCallRuntime(ExternalReference(IC_Utility(kStoreIC_Miss)), 3, 1);
}
void StoreIC::GenerateExtendStorage(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : Map (target of map transition)
+ // -- rdx : receiver
// -- rsp[0] : return address
- // -- rsp[8] : receiver
// -----------------------------------
__ pop(rbx);
- __ push(Operand(rsp, 0)); // receiver
+ __ push(rdx); // receiver
__ push(rcx); // transition map
__ push(rax); // value
__ push(rbx); // return address
@@ -1371,19 +1367,18 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : name
+ // -- rdx : receiver
// -- rsp[0] : return address
- // -- rsp[8] : receiver
// -----------------------------------
// Get the receiver from the stack and probe the stub cache.
- __ movq(rdx, Operand(rsp, kPointerSize));
Code::Flags flags = Code::ComputeFlags(Code::STORE_IC,
NOT_IN_LOOP,
MONOMORPHIC);
StubCache::GenerateProbe(masm, flags, rdx, rcx, rbx, no_reg);
// Cache miss: Jump to runtime.
- Generate(masm, ExternalReference(IC_Utility(kStoreIC_Miss)));
+ GenerateMiss(masm);
}
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index dc77c8948..96b45e842 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -178,6 +178,11 @@ void MacroAssembler::RecordWrite(Register object,
int offset,
Register value,
Register smi_index) {
+ // The compiled code assumes that record write doesn't change the
+ // context register, so we check that none of the clobbered
+ // registers are rsi.
+ ASSERT(!object.is(rsi) && !value.is(rsi) && !smi_index.is(rsi));
+
// First, check if a remembered set write is even needed. The tests below
// catch stores of Smis and stores into young gen (which does not have space
// for the remembered set bits.
@@ -186,6 +191,17 @@ void MacroAssembler::RecordWrite(Register object,
RecordWriteNonSmi(object, offset, value, smi_index);
bind(&done);
+
+ // Clobber all input registers when running with the debug-code flag
+ // turned on to provoke errors. This clobbering repeats the
+ // clobbering done inside RecordWriteNonSmi but it's necessary to
+ // avoid having the fast case for smis leave the registers
+ // unchanged.
+ if (FLAG_debug_code) {
+ movq(object, bit_cast<int64_t>(kZapValue), RelocInfo::NONE);
+ movq(value, bit_cast<int64_t>(kZapValue), RelocInfo::NONE);
+ movq(smi_index, bit_cast<int64_t>(kZapValue), RelocInfo::NONE);
+ }
}
@@ -194,6 +210,14 @@ void MacroAssembler::RecordWriteNonSmi(Register object,
Register scratch,
Register smi_index) {
Label done;
+
+ if (FLAG_debug_code) {
+ Label okay;
+ JumpIfNotSmi(object, &okay);
+ Abort("MacroAssembler::RecordWriteNonSmi cannot deal with smis");
+ bind(&okay);
+ }
+
// Test that the object address is not in the new space. We cannot
// set remembered set bits in the new space.
movq(scratch, object);
@@ -243,6 +267,14 @@ void MacroAssembler::RecordWriteNonSmi(Register object,
}
bind(&done);
+
+ // Clobber all input registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (FLAG_debug_code) {
+ movq(object, bit_cast<int64_t>(kZapValue), RelocInfo::NONE);
+ movq(scratch, bit_cast<int64_t>(kZapValue), RelocInfo::NONE);
+ movq(smi_index, bit_cast<int64_t>(kZapValue), RelocInfo::NONE);
+ }
}
@@ -344,10 +376,14 @@ void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
return;
}
- Runtime::FunctionId function_id =
- static_cast<Runtime::FunctionId>(f->stub_id);
- RuntimeStub stub(function_id, num_arguments);
- CallStub(&stub);
+ // TODO(1236192): Most runtime routines don't need the number of
+ // arguments passed in because it is constant. At some point we
+ // should remove this need and make the runtime routine entry code
+ // smarter.
+ movq(rax, Immediate(num_arguments));
+ movq(rbx, ExternalReference(f));
+ CEntryStub ces(f->result_size);
+ CallStub(&ces);
}
@@ -581,6 +617,31 @@ Condition MacroAssembler::CheckBothSmi(Register first, Register second) {
}
+Condition MacroAssembler::CheckBothPositiveSmi(Register first,
+ Register second) {
+ if (first.is(second)) {
+ return CheckPositiveSmi(first);
+ }
+ movl(kScratchRegister, first);
+ orl(kScratchRegister, second);
+ rol(kScratchRegister, Immediate(1));
+ testl(kScratchRegister, Immediate(0x03));
+ return zero;
+}
+
+
+
+Condition MacroAssembler::CheckEitherSmi(Register first, Register second) {
+ if (first.is(second)) {
+ return CheckSmi(first);
+ }
+ movl(kScratchRegister, first);
+ andl(kScratchRegister, second);
+ testb(kScratchRegister, Immediate(kSmiTagMask));
+ return zero;
+}
+
+
Condition MacroAssembler::CheckIsMinSmi(Register src) {
ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
movq(kScratchRegister, src);
@@ -649,7 +710,17 @@ void MacroAssembler::SmiSub(Register dst,
Register src2,
Label* on_not_smi_result) {
ASSERT(!dst.is(src2));
- if (dst.is(src1)) {
+ if (on_not_smi_result == NULL) {
+ // No overflow checking. Use only when it's known that
+ // overflowing is impossible (e.g., subtracting two positive smis).
+ if (dst.is(src1)) {
+ subq(dst, src2);
+ } else {
+ movq(dst, src1);
+ subq(dst, src2);
+ }
+ Assert(no_overflow, "Smi substraction onverflow");
+ } else if (dst.is(src1)) {
subq(dst, src2);
Label smi_result;
j(no_overflow, &smi_result);
@@ -1281,6 +1352,46 @@ void MacroAssembler::JumpIfNotBothSmi(Register src1, Register src2,
}
+void MacroAssembler::JumpIfNotBothPositiveSmi(Register src1, Register src2,
+ Label* on_not_both_smi) {
+ Condition both_smi = CheckBothPositiveSmi(src1, src2);
+ j(NegateCondition(both_smi), on_not_both_smi);
+}
+
+
+
+void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first_object,
+ Register second_object,
+ Register scratch1,
+ Register scratch2,
+ Label* on_fail) {
+ // Check that both objects are not smis.
+ Condition either_smi = CheckEitherSmi(first_object, second_object);
+ j(either_smi, on_fail);
+
+ // Load instance type for both strings.
+ movq(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
+ movq(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
+ movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
+ movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
+
+ // Check that both are flat ascii strings.
+ ASSERT(kNotStringTag != 0);
+ const int kFlatAsciiStringMask =
+ kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
+ const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+
+ andl(scratch1, Immediate(kFlatAsciiStringMask));
+ andl(scratch2, Immediate(kFlatAsciiStringMask));
+ // Interleave the bits to check both scratch1 and scratch2 in one test.
+ ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
+ lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
+ cmpl(scratch1,
+ Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
+ j(not_equal, on_fail);
+}
+
+
void MacroAssembler::Move(Register dst, Handle<Object> source) {
ASSERT(!source->IsFailure());
if (source->IsSmi()) {
@@ -1474,6 +1585,17 @@ void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
}
+Condition MacroAssembler::IsObjectStringType(Register heap_object,
+ Register map,
+ Register instance_type) {
+ movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
+ movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
+ ASSERT(kNotStringTag != 0);
+ testb(instance_type, Immediate(kIsNotStringMask));
+ return zero;
+}
+
+
void MacroAssembler::TryGetFunctionPrototype(Register function,
Register result,
Label* miss) {
@@ -2387,6 +2509,51 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
}
}
+int MacroAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
+ // On Windows stack slots are reserved by the caller for all arguments
+ // including the ones passed in registers. On Linux 6 arguments are passed in
+ // registers and the caller does not reserve stack slots for them.
+ ASSERT(num_arguments >= 0);
+#ifdef _WIN64
+ static const int kArgumentsWithoutStackSlot = 0;
+#else
+ static const int kArgumentsWithoutStackSlot = 6;
+#endif
+ return num_arguments > kArgumentsWithoutStackSlot ?
+ num_arguments - kArgumentsWithoutStackSlot : 0;
+}
+
+void MacroAssembler::PrepareCallCFunction(int num_arguments) {
+ int frame_alignment = OS::ActivationFrameAlignment();
+ ASSERT(frame_alignment != 0);
+ ASSERT(num_arguments >= 0);
+ // Make stack end at alignment and allocate space for arguments and old rsp.
+ movq(kScratchRegister, rsp);
+ ASSERT(IsPowerOf2(frame_alignment));
+ int argument_slots_on_stack =
+ ArgumentStackSlotsForCFunctionCall(num_arguments);
+ subq(rsp, Immediate((argument_slots_on_stack + 1) * kPointerSize));
+ and_(rsp, Immediate(-frame_alignment));
+ movq(Operand(rsp, argument_slots_on_stack * kPointerSize), kScratchRegister);
+}
+
+
+void MacroAssembler::CallCFunction(ExternalReference function,
+ int num_arguments) {
+ movq(rax, function);
+ CallCFunction(rax, num_arguments);
+}
+
+
+void MacroAssembler::CallCFunction(Register function, int num_arguments) {
+ call(function);
+ ASSERT(OS::ActivationFrameAlignment() != 0);
+ ASSERT(num_arguments >= 0);
+ int argument_slots_on_stack =
+ ArgumentStackSlotsForCFunctionCall(num_arguments);
+ movq(rsp, Operand(rsp, argument_slots_on_stack * kPointerSize));
+}
+
CodePatcher::CodePatcher(byte* address, int size)
: address_(address), size_(size), masm_(address, size + Assembler::kGap) {
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index 37f96a66f..2913274db 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -162,7 +162,8 @@ class MacroAssembler: public Assembler {
// Conversions between tagged smi values and non-tagged integer values.
// Tag an integer value. The result must be known to be a valid smi value.
- // Only uses the low 32 bits of the src register.
+ // Only uses the low 32 bits of the src register. Sets the N and Z flags
+ // based on the value of the resulting integer.
void Integer32ToSmi(Register dst, Register src);
// Tag an integer value if possible, or jump the integer value cannot be
@@ -204,9 +205,15 @@ class MacroAssembler: public Assembler {
// Is the value a positive tagged smi.
Condition CheckPositiveSmi(Register src);
- // Are both values are tagged smis.
+ // Are both values tagged smis.
Condition CheckBothSmi(Register first, Register second);
+ // Are both values tagged smis.
+ Condition CheckBothPositiveSmi(Register first, Register second);
+
+ // Are either value a tagged smi.
+ Condition CheckEitherSmi(Register first, Register second);
+
// Is the value the minimum smi value (since we are using
// two's complement numbers, negating the value is known to yield
// a non-smi value).
@@ -245,6 +252,10 @@ class MacroAssembler: public Assembler {
// Jump if either or both register are not smi values.
void JumpIfNotBothSmi(Register src1, Register src2, Label* on_not_both_smi);
+ // Jump if either or both register are not positive smi values.
+ void JumpIfNotBothPositiveSmi(Register src1, Register src2,
+ Label* on_not_both_smi);
+
// Operations on tagged smi values.
// Smis represent a subset of integers. The subset is always equivalent to
@@ -403,6 +414,14 @@ class MacroAssembler: public Assembler {
void Test(const Operand& dst, Smi* source);
// ---------------------------------------------------------------------------
+ // String macros.
+ void JumpIfNotBothSequentialAsciiStrings(Register first_object,
+ Register second_object,
+ Register scratch1,
+ Register scratch2,
+ Label* on_not_both_flat_ascii);
+
+ // ---------------------------------------------------------------------------
// Macro instructions.
// Load a register with a long value as efficiently as possible.
@@ -441,6 +460,15 @@ class MacroAssembler: public Assembler {
// Always use unsigned comparisons: above and below, not less and greater.
void CmpInstanceType(Register map, InstanceType type);
+ // Check if the object in register heap_object is a string. Afterwards the
+ // register map contains the object map and the register instance_type
+ // contains the instance_type. The registers map and instance_type can be the
+ // same in which case it contains the instance type afterwards. Either of the
+ // registers map and instance_type can be the same as heap_object.
+ Condition IsObjectStringType(Register heap_object,
+ Register map,
+ Register instance_type);
+
// FCmp is similar to integer cmp, but requires unsigned
// jcc instructions (je, ja, jae, jb, jbe, je, and jz).
void FCmp();
@@ -617,6 +645,26 @@ class MacroAssembler: public Assembler {
// Jump to a runtime routine.
void JumpToRuntime(const ExternalReference& ext, int result_size);
+ // Before calling a C-function from generated code, align arguments on stack.
+ // After aligning the frame, arguments must be stored in esp[0], esp[4],
+ // etc., not pushed. The argument count assumes all arguments are word sized.
+ // The number of slots reserved for arguments depends on platform. On Windows
+ // stack slots are reserved for the arguments passed in registers. On other
+ // platforms stack slots are only reserved for the arguments actually passed
+ // on the stack.
+ void PrepareCallCFunction(int num_arguments);
+
+ // Calls a C function and cleans up the space for arguments allocated
+ // by PrepareCallCFunction. The called function is not allowed to trigger a
+ // garbage collection, since that might move the code and invalidate the
+ // return address (unless this is somehow accounted for by the called
+ // function).
+ void CallCFunction(ExternalReference function, int num_arguments);
+ void CallCFunction(Register function, int num_arguments);
+
+ // Calculate the number of stack slots to reserve for arguments when calling a
+ // C function.
+ int ArgumentStackSlotsForCFunctionCall(int num_arguments);
// ---------------------------------------------------------------------------
// Utilities
diff --git a/deps/v8/src/x64/regexp-macro-assembler-x64.cc b/deps/v8/src/x64/regexp-macro-assembler-x64.cc
index 09cb9177a..026301b2a 100644
--- a/deps/v8/src/x64/regexp-macro-assembler-x64.cc
+++ b/deps/v8/src/x64/regexp-macro-assembler-x64.cc
@@ -71,8 +71,6 @@ namespace internal {
* through the runtime system)
* - stack_area_base (High end of the memory area to use as
* backtracking stack)
- * - at_start (if 1, we are starting at the start of the
- * string, otherwise 0)
* - int* capture_array (int[num_saved_registers_], for output).
* - end of input (Address of end of string)
* - start of input (Address of first character in string)
@@ -82,6 +80,8 @@ namespace internal {
* - backup of callee save registers (rbx, possibly rsi and rdi).
* - Offset of location before start of input (effectively character
* position -1). Used to initialize capture registers to a non-position.
+ * - At start of string (if 1, we are starting at the start of the
+ * string, otherwise 0)
* - register 0 rbp[-n] (Only positions must be stored in the first
* - register 1 rbp[-n-8] num_saved_registers_ registers)
* - ...
@@ -329,14 +329,14 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
ASSERT(mode_ == UC16);
// Save important/volatile registers before calling C function.
#ifndef _WIN64
- // Callee save on Win64
+ // Caller save on Linux and callee save in Windows.
__ push(rsi);
__ push(rdi);
#endif
__ push(backtrack_stackpointer());
int num_arguments = 3;
- FrameAlign(num_arguments);
+ __ PrepareCallCFunction(num_arguments);
// Put arguments into parameter registers. Parameters are
// Address byte_offset1 - Address captured substring's start.
@@ -361,7 +361,7 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
#endif
ExternalReference compare =
ExternalReference::re_case_insensitive_compare_uc16();
- CallCFunction(compare, num_arguments);
+ __ CallCFunction(compare, num_arguments);
// Restore original values before reacting on result value.
__ Move(code_object_pointer(), masm_->CodeObject());
@@ -582,49 +582,38 @@ bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type,
return true;
}
case 'w': {
- Label done, check_digits;
- __ cmpl(current_character(), Immediate('9'));
- __ j(less_equal, &check_digits);
- __ cmpl(current_character(), Immediate('_'));
- __ j(equal, &done);
- // Convert to lower case if letter.
- __ movl(rax, current_character());
- __ orl(rax, Immediate(0x20));
- // check rax in range ['a'..'z'].
- __ subl(rax, Immediate('a'));
- __ cmpl(rax, Immediate('z' - 'a'));
- BranchOrBacktrack(above, on_no_match);
- __ jmp(&done);
- __ bind(&check_digits);
- // Check current character in range ['0'..'9'].
- __ cmpl(current_character(), Immediate('0'));
- BranchOrBacktrack(below, on_no_match);
- __ bind(&done);
-
+ if (mode_ != ASCII) {
+ // Table is 128 entries, so all ASCII characters can be tested.
+ __ cmpl(current_character(), Immediate('z'));
+ BranchOrBacktrack(above, on_no_match);
+ }
+ __ movq(rbx, ExternalReference::re_word_character_map());
+ ASSERT_EQ(0, word_character_map[0]); // Character '\0' is not a word char.
+ ExternalReference word_map = ExternalReference::re_word_character_map();
+ __ testb(Operand(rbx, current_character(), times_1, 0),
+ current_character());
+ BranchOrBacktrack(zero, on_no_match);
return true;
}
case 'W': {
- Label done, check_digits;
- __ cmpl(current_character(), Immediate('9'));
- __ j(less_equal, &check_digits);
- __ cmpl(current_character(), Immediate('_'));
- BranchOrBacktrack(equal, on_no_match);
- // Convert to lower case if letter.
- __ movl(rax, current_character());
- __ orl(rax, Immediate(0x20));
- // check current character in range ['a'..'z'], nondestructively.
- __ subl(rax, Immediate('a'));
- __ cmpl(rax, Immediate('z' - 'a'));
- BranchOrBacktrack(below_equal, on_no_match);
- __ jmp(&done);
- __ bind(&check_digits);
- // Check current character in range ['0'..'9'].
- __ cmpl(current_character(), Immediate('0'));
- BranchOrBacktrack(above_equal, on_no_match);
- __ bind(&done);
-
+ Label done;
+ if (mode_ != ASCII) {
+ // Table is 128 entries, so all ASCII characters can be tested.
+ __ cmpl(current_character(), Immediate('z'));
+ __ j(above, &done);
+ }
+ __ movq(rbx, ExternalReference::re_word_character_map());
+ ASSERT_EQ(0, word_character_map[0]); // Character '\0' is not a word char.
+ ExternalReference word_map = ExternalReference::re_word_character_map();
+ __ testb(Operand(rbx, current_character(), times_1, 0),
+ current_character());
+ BranchOrBacktrack(not_zero, on_no_match);
+ if (mode_ != ASCII) {
+ __ bind(&done);
+ }
return true;
}
+
case '*':
// Match any character.
return true;
@@ -645,7 +634,6 @@ void RegExpMacroAssemblerX64::Fail() {
Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// Finalize code - write the entry point code now we know how many
// registers we need.
-
// Entry code:
__ bind(&entry_label_);
// Start new stack frame.
@@ -672,7 +660,7 @@ Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
ASSERT_EQ(kInputStart, -3 * kPointerSize);
ASSERT_EQ(kInputEnd, -4 * kPointerSize);
ASSERT_EQ(kRegisterOutput, -5 * kPointerSize);
- ASSERT_EQ(kAtStart, -6 * kPointerSize);
+ ASSERT_EQ(kStackHighEnd, -6 * kPointerSize);
__ push(rdi);
__ push(rsi);
__ push(rdx);
@@ -682,7 +670,9 @@ Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ push(rbx); // Callee-save
#endif
+
__ push(Immediate(0)); // Make room for "input start - 1" constant.
+ __ push(Immediate(0)); // Make room for "at start" constant.
// Check if we have space on the stack for registers.
Label stack_limit_hit;
@@ -727,6 +717,15 @@ Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// Store this value in a local variable, for use when clearing
// position registers.
__ movq(Operand(rbp, kInputStartMinusOne), rax);
+
+ // Determine whether the start index is zero, that is at the start of the
+ // string, and store that value in a local variable.
+ __ movq(rbx, Operand(rbp, kStartIndex));
+ __ xor_(rcx, rcx); // setcc only operates on cl (lower byte of rcx).
+ __ testq(rbx, rbx);
+ __ setcc(zero, rcx); // 1 if 0 (start of string), 0 if positive.
+ __ movq(Operand(rbp, kAtStart), rcx);
+
if (num_saved_registers_ > 0) {
// Fill saved registers with initial value = start offset - 1
// Fill in stack push order, to avoid accessing across an unwritten
@@ -851,7 +850,7 @@ Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// Call GrowStack(backtrack_stackpointer())
int num_arguments = 2;
- FrameAlign(num_arguments);
+ __ PrepareCallCFunction(num_arguments);
#ifdef _WIN64
// Microsoft passes parameters in rcx, rdx.
// First argument, backtrack stackpointer, is already in rcx.
@@ -862,7 +861,7 @@ Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ lea(rsi, Operand(rbp, kStackHighEnd)); // Second argument.
#endif
ExternalReference grow_stack = ExternalReference::re_grow_stack();
- CallCFunction(grow_stack, num_arguments);
+ __ CallCFunction(grow_stack, num_arguments);
// If return NULL, we have failed to grow the stack, and
// must exit with a stack-overflow exception.
__ testq(rax, rax);
@@ -1031,7 +1030,7 @@ void RegExpMacroAssemblerX64::CallCheckStackGuardState() {
// This function call preserves no register values. Caller should
// store anything volatile in a C call or overwritten by this function.
int num_arguments = 3;
- FrameAlign(num_arguments);
+ __ PrepareCallCFunction(num_arguments);
#ifdef _WIN64
// Second argument: Code* of self. (Do this before overwriting r8).
__ movq(rdx, code_object_pointer());
@@ -1051,7 +1050,7 @@ void RegExpMacroAssemblerX64::CallCheckStackGuardState() {
#endif
ExternalReference stack_check =
ExternalReference::re_check_stack_guard_state();
- CallCFunction(stack_check, num_arguments);
+ __ CallCFunction(stack_check, num_arguments);
}
@@ -1073,6 +1072,12 @@ int RegExpMacroAssemblerX64::CheckStackGuardState(Address* return_address,
// If not real stack overflow the stack guard was used to interrupt
// execution for another purpose.
+ // If this is a direct call from JavaScript retry the RegExp forcing the call
+ // through the runtime system. Currently the direct call cannot handle a GC.
+ if (frame_entry<int>(re_frame, kDirectCall) == 1) {
+ return RETRY;
+ }
+
// Prepare for possible GC.
HandleScope handles;
Handle<Code> code_handle(re_code);
@@ -1267,45 +1272,6 @@ void RegExpMacroAssemblerX64::CheckStackLimit() {
}
-void RegExpMacroAssemblerX64::FrameAlign(int num_arguments) {
- // TODO(lrn): Since we no longer use the system stack arbitrarily (but we do
- // use it, e.g., for SafeCall), we know the number of elements on the stack
- // since the last frame alignment. We might be able to do this simpler then.
- int frameAlignment = OS::ActivationFrameAlignment();
- ASSERT(frameAlignment != 0);
- // Make stack end at alignment and make room for num_arguments pointers
- // (on Win64 only) and the original value of rsp.
- __ movq(kScratchRegister, rsp);
- ASSERT(IsPowerOf2(frameAlignment));
-#ifdef _WIN64
- // Allocate space for parameters and old rsp.
- __ subq(rsp, Immediate((num_arguments + 1) * kPointerSize));
- __ and_(rsp, Immediate(-frameAlignment));
- __ movq(Operand(rsp, num_arguments * kPointerSize), kScratchRegister);
-#else
- // Allocate space for old rsp.
- __ subq(rsp, Immediate(kPointerSize));
- __ and_(rsp, Immediate(-frameAlignment));
- __ movq(Operand(rsp, 0), kScratchRegister);
-#endif
-}
-
-
-void RegExpMacroAssemblerX64::CallCFunction(ExternalReference function,
- int num_arguments) {
- __ movq(rax, function);
- __ call(rax);
- ASSERT(OS::ActivationFrameAlignment() != 0);
-#ifdef _WIN64
- __ movq(rsp, Operand(rsp, num_arguments * kPointerSize));
-#else
- // All arguments passed in registers.
- ASSERT(num_arguments <= 6);
- __ pop(rsp);
-#endif
-}
-
-
void RegExpMacroAssemblerX64::LoadCurrentCharacterUnchecked(int cp_offset,
int characters) {
if (mode_ == ASCII) {
diff --git a/deps/v8/src/x64/regexp-macro-assembler-x64.h b/deps/v8/src/x64/regexp-macro-assembler-x64.h
index 694cba003..6d1396355 100644
--- a/deps/v8/src/x64/regexp-macro-assembler-x64.h
+++ b/deps/v8/src/x64/regexp-macro-assembler-x64.h
@@ -138,9 +138,7 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
static const int kInputStart = kStartIndex + kPointerSize;
static const int kInputEnd = kInputStart + kPointerSize;
static const int kRegisterOutput = kInputEnd + kPointerSize;
- // AtStart is passed as 32 bit int (values 0 or 1).
- static const int kAtStart = kRegisterOutput + kPointerSize;
- static const int kStackHighEnd = kAtStart + kPointerSize;
+ static const int kStackHighEnd = kRegisterOutput + kPointerSize;
// DirectCall is passed as 32 bit int (values 0 or 1).
static const int kDirectCall = kStackHighEnd + kPointerSize;
#else
@@ -152,9 +150,8 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
static const int kInputStart = kStartIndex - kPointerSize;
static const int kInputEnd = kInputStart - kPointerSize;
static const int kRegisterOutput = kInputEnd - kPointerSize;
- static const int kAtStart = kRegisterOutput - kPointerSize;
- static const int kStackHighEnd = kFrameAlign;
- static const int kDirectCall = kStackHighEnd + kPointerSize;
+ static const int kStackHighEnd = kRegisterOutput - kPointerSize;
+ static const int kDirectCall = kFrameAlign;
#endif
#ifdef _WIN64
@@ -168,7 +165,7 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
// AMD64 Calling Convention has only one callee-save register that
// we use. We push this after the frame pointer (and after the
// parameters).
- static const int kBackup_rbx = kAtStart - kPointerSize;
+ static const int kBackup_rbx = kStackHighEnd - kPointerSize;
static const int kLastCalleeSaveRegister = kBackup_rbx;
#endif
@@ -176,9 +173,10 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
// the frame in GetCode.
static const int kInputStartMinusOne =
kLastCalleeSaveRegister - kPointerSize;
+ static const int kAtStart = kInputStartMinusOne - kPointerSize;
// First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
+ static const int kRegisterZero = kAtStart - kPointerSize;
// Initial size of code buffer.
static const size_t kRegExpCodeSize = 1024;
@@ -249,21 +247,6 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
// Increments the stack pointer (rcx) by a word size.
inline void Drop();
- // Before calling a C-function from generated code, align arguments on stack.
- // After aligning the frame, arguments must be stored in esp[0], esp[4],
- // etc., not pushed. The argument count assumes all arguments are word sized.
- // Some compilers/platforms require the stack to be aligned when calling
- // C++ code.
- // Needs a scratch register to do some arithmetic. This register will be
- // trashed.
- inline void FrameAlign(int num_arguments);
-
- // Calls a C function and cleans up the space for arguments allocated
- // by FrameAlign. The called function is not allowed to trigger a garbage
- // collection, since that might move the code and invalidate the return
- // address (unless this is somehow accounted for by the called function).
- inline void CallCFunction(ExternalReference function, int num_arguments);
-
MacroAssembler* masm_;
ZoneList<int> code_relative_fixup_positions_;
diff --git a/deps/v8/src/x64/simulator-x64.h b/deps/v8/src/x64/simulator-x64.h
index 015ba1315..a0fc3cbf4 100644
--- a/deps/v8/src/x64/simulator-x64.h
+++ b/deps/v8/src/x64/simulator-x64.h
@@ -54,8 +54,8 @@ class SimulatorStack : public v8::internal::AllStatic {
// Call the generated regexp code directly. The entry function pointer should
// expect eight int/pointer sized arguments and return an int.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
- entry(p0, p1, p2, p3, p4, p5, p6, p7)
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
+ entry(p0, p1, p2, p3, p4, p5, p6)
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
reinterpret_cast<TryCatch*>(try_catch_address)
diff --git a/deps/v8/src/x64/stub-cache-x64.cc b/deps/v8/src/x64/stub-cache-x64.cc
index cbddb61ee..693447b5c 100644
--- a/deps/v8/src/x64/stub-cache-x64.cc
+++ b/deps/v8/src/x64/stub-cache-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -143,6 +143,7 @@ static void PushInterceptorArguments(MacroAssembler* masm,
__ push(holder);
__ push(name);
InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
+ ASSERT(!Heap::InNewSpace(interceptor));
__ movq(kScratchRegister, Handle<Object>(interceptor),
RelocInfo::EMBEDDED_OBJECT);
__ push(kScratchRegister);
@@ -370,15 +371,47 @@ void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
}
-static void LookupPostInterceptor(JSObject* holder,
- String* name,
- LookupResult* lookup) {
- holder->LocalLookupRealNamedProperty(name, lookup);
- if (lookup->IsNotFound()) {
- Object* proto = holder->GetPrototype();
- if (proto != Heap::null_value()) {
- proto->Lookup(name, lookup);
- }
+template <class Compiler>
+static void CompileLoadInterceptor(Compiler* compiler,
+ StubCompiler* stub_compiler,
+ MacroAssembler* masm,
+ JSObject* object,
+ JSObject* holder,
+ String* name,
+ LookupResult* lookup,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Label* miss) {
+ ASSERT(holder->HasNamedInterceptor());
+ ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, miss);
+
+ // Check that the maps haven't changed.
+ Register reg =
+ stub_compiler->CheckPrototypes(object, receiver, holder,
+ scratch1, scratch2, name, miss);
+
+ if (lookup->IsValid() && lookup->IsCacheable()) {
+ compiler->CompileCacheable(masm,
+ stub_compiler,
+ receiver,
+ reg,
+ scratch1,
+ scratch2,
+ holder,
+ lookup,
+ name,
+ miss);
+ } else {
+ compiler->CompileRegular(masm,
+ receiver,
+ reg,
+ scratch2,
+ holder,
+ miss);
}
}
@@ -518,51 +551,6 @@ class LoadInterceptorCompiler BASE_EMBEDDED {
};
-template <class Compiler>
-static void CompileLoadInterceptor(Compiler* compiler,
- StubCompiler* stub_compiler,
- MacroAssembler* masm,
- JSObject* object,
- JSObject* holder,
- String* name,
- LookupResult* lookup,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* miss) {
- ASSERT(holder->HasNamedInterceptor());
- ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
-
- // Check that the maps haven't changed.
- Register reg =
- stub_compiler->CheckPrototypes(object, receiver, holder,
- scratch1, scratch2, name, miss);
-
- if (lookup->IsValid() && lookup->IsCacheable()) {
- compiler->CompileCacheable(masm,
- stub_compiler,
- receiver,
- reg,
- scratch1,
- scratch2,
- holder,
- lookup,
- name,
- miss);
- } else {
- compiler->CompileRegular(masm,
- receiver,
- reg,
- scratch2,
- holder,
- miss);
- }
-}
-
-
class CallInterceptorCompiler BASE_EMBEDDED {
public:
explicit CallInterceptorCompiler(const ParameterCount& arguments)
@@ -631,7 +619,6 @@ class CallInterceptorCompiler BASE_EMBEDDED {
__ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// Jump to the cached code (tail call).
- ASSERT(function->is_compiled());
Handle<Code> code(function->code());
ParameterCount expected(function->shared()->formal_parameter_count());
__ InvokeCode(code, expected, arguments_,
@@ -688,7 +675,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
// rsp[16] argument argc - 1
// ...
// rsp[argc * 8] argument 1
- // rsp[(argc + 1) * 8] argument 0 = reciever
+ // rsp[(argc + 1) * 8] argument 0 = receiver
// rsp[(argc + 2) * 8] function name
Label miss;
@@ -721,47 +708,62 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
break;
case STRING_CHECK:
- // Check that the object is a two-byte string or a symbol.
- __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, rcx);
- __ j(above_equal, &miss);
- // Check that the maps starting from the prototype haven't changed.
- GenerateLoadGlobalFunctionPrototype(masm(),
- Context::STRING_FUNCTION_INDEX,
- rcx);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), rcx, holder,
- rbx, rdx, name, &miss);
+ if (!function->IsBuiltin()) {
+ // Calling non-builtins with a value as receiver requires boxing.
+ __ jmp(&miss);
+ } else {
+ // Check that the object is a two-byte string or a symbol.
+ __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, rcx);
+ __ j(above_equal, &miss);
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateLoadGlobalFunctionPrototype(masm(),
+ Context::STRING_FUNCTION_INDEX,
+ rcx);
+ CheckPrototypes(JSObject::cast(object->GetPrototype()), rcx, holder,
+ rbx, rdx, name, &miss);
+ }
break;
case NUMBER_CHECK: {
- Label fast;
- // Check that the object is a smi or a heap number.
- __ JumpIfSmi(rdx, &fast);
- __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx);
- __ j(not_equal, &miss);
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateLoadGlobalFunctionPrototype(masm(),
- Context::NUMBER_FUNCTION_INDEX,
- rcx);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), rcx, holder,
- rbx, rdx, name, &miss);
+ if (!function->IsBuiltin()) {
+ // Calling non-builtins with a value as receiver requires boxing.
+ __ jmp(&miss);
+ } else {
+ Label fast;
+ // Check that the object is a smi or a heap number.
+ __ JumpIfSmi(rdx, &fast);
+ __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx);
+ __ j(not_equal, &miss);
+ __ bind(&fast);
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateLoadGlobalFunctionPrototype(masm(),
+ Context::NUMBER_FUNCTION_INDEX,
+ rcx);
+ CheckPrototypes(JSObject::cast(object->GetPrototype()), rcx, holder,
+ rbx, rdx, name, &miss);
+ }
break;
}
case BOOLEAN_CHECK: {
- Label fast;
- // Check that the object is a boolean.
- __ CompareRoot(rdx, Heap::kTrueValueRootIndex);
- __ j(equal, &fast);
- __ CompareRoot(rdx, Heap::kFalseValueRootIndex);
- __ j(not_equal, &miss);
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateLoadGlobalFunctionPrototype(masm(),
- Context::BOOLEAN_FUNCTION_INDEX,
- rcx);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), rcx, holder,
- rbx, rdx, name, &miss);
+ if (!function->IsBuiltin()) {
+ // Calling non-builtins with a value as receiver requires boxing.
+ __ jmp(&miss);
+ } else {
+ Label fast;
+ // Check that the object is a boolean.
+ __ CompareRoot(rdx, Heap::kTrueValueRootIndex);
+ __ j(equal, &fast);
+ __ CompareRoot(rdx, Heap::kFalseValueRootIndex);
+ __ j(not_equal, &miss);
+ __ bind(&fast);
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateLoadGlobalFunctionPrototype(masm(),
+ Context::BOOLEAN_FUNCTION_INDEX,
+ rcx);
+ CheckPrototypes(JSObject::cast(object->GetPrototype()), rcx, holder,
+ rbx, rdx, name, &miss);
+ }
break;
}
@@ -1339,25 +1341,22 @@ Object* StoreStubCompiler::CompileStoreCallback(JSObject* object,
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : name
+ // -- rdx : receiver
// -- rsp[0] : return address
- // -- rsp[8] : receiver
// -----------------------------------
Label miss;
- // Get the object from the stack.
- __ movq(rbx, Operand(rsp, 1 * kPointerSize));
-
// Check that the object isn't a smi.
- __ JumpIfSmi(rbx, &miss);
+ __ JumpIfSmi(rdx, &miss);
// Check that the map of the object hasn't changed.
- __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
+ __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
Handle<Map>(object->map()));
__ j(not_equal, &miss);
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(rbx, rdx, &miss);
+ __ CheckAccessGlobalProxy(rdx, rbx, &miss);
}
// Stub never generated for non-global objects that require access
@@ -1365,7 +1364,7 @@ Object* StoreStubCompiler::CompileStoreCallback(JSObject* object,
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
__ pop(rbx); // remove the return address
- __ push(Operand(rsp, 0)); // receiver
+ __ push(rdx); // receiver
__ Push(Handle<AccessorInfo>(callback)); // callback info
__ push(rcx); // name
__ push(rax); // value
@@ -1378,7 +1377,6 @@ Object* StoreStubCompiler::CompileStoreCallback(JSObject* object,
// Handle store cache miss.
__ bind(&miss);
- __ Move(rcx, Handle<String>(name)); // restore name
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
__ Jump(ic, RelocInfo::CODE_TARGET);
@@ -1394,21 +1392,18 @@ Object* StoreStubCompiler::CompileStoreField(JSObject* object,
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : name
+ // -- rdx : receiver
// -- rsp[0] : return address
- // -- rsp[8] : receiver
// -----------------------------------
Label miss;
- // Get the object from the stack.
- __ movq(rbx, Operand(rsp, 1 * kPointerSize));
-
// Generate store field code. Trashes the name register.
GenerateStoreField(masm(),
Builtins::StoreIC_ExtendStorage,
object,
index,
transition,
- rbx, rcx, rdx,
+ rdx, rcx, rbx,
&miss);
// Handle store cache miss.
@@ -1427,25 +1422,22 @@ Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : name
+ // -- rdx : receiver
// -- rsp[0] : return address
- // -- rsp[8] : receiver
// -----------------------------------
Label miss;
- // Get the object from the stack.
- __ movq(rbx, Operand(rsp, 1 * kPointerSize));
-
// Check that the object isn't a smi.
- __ JumpIfSmi(rbx, &miss);
+ __ JumpIfSmi(rdx, &miss);
// Check that the map of the object hasn't changed.
- __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
+ __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
Handle<Map>(receiver->map()));
__ j(not_equal, &miss);
// Perform global security token check if needed.
if (receiver->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(rbx, rdx, &miss);
+ __ CheckAccessGlobalProxy(rdx, rbx, &miss);
}
// Stub never generated for non-global objects that require access
@@ -1453,7 +1445,7 @@ Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
ASSERT(receiver->IsJSGlobalProxy() || !receiver->IsAccessCheckNeeded());
__ pop(rbx); // remove the return address
- __ push(Operand(rsp, 0)); // receiver
+ __ push(rdx); // receiver
__ push(rcx); // name
__ push(rax); // value
__ push(rbx); // restore return address
@@ -1465,7 +1457,6 @@ Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
// Handle store cache miss.
__ bind(&miss);
- __ Move(rcx, Handle<String>(name)); // restore name
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
__ Jump(ic, RelocInfo::CODE_TARGET);
@@ -1480,14 +1471,13 @@ Object* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : name
+ // -- rdx : receiver
// -- rsp[0] : return address
- // -- rsp[8] : receiver
// -----------------------------------
Label miss;
// Check that the map of the global has not changed.
- __ movq(rbx, Operand(rsp, kPointerSize));
- __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
+ __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
Handle<Map>(object->map()));
__ j(not_equal, &miss);
diff --git a/deps/v8/src/x64/virtual-frame-x64.cc b/deps/v8/src/x64/virtual-frame-x64.cc
index 6e84ed163..cb93d5d46 100644
--- a/deps/v8/src/x64/virtual-frame-x64.cc
+++ b/deps/v8/src/x64/virtual-frame-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -1046,31 +1046,45 @@ Result VirtualFrame::CallConstructor(int arg_count) {
Result VirtualFrame::CallStoreIC() {
// Name, value, and receiver are on top of the frame. The IC
- // expects name in rcx, value in rax, and receiver on the stack. It
- // does not drop the receiver.
+ // expects name in rcx, value in rax, and receiver in edx.
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
Result name = Pop();
Result value = Pop();
- PrepareForCall(1, 0); // One stack arg, not callee-dropped.
+ Result receiver = Pop();
+ PrepareForCall(0, 0);
- if (value.is_register() && value.reg().is(rcx)) {
- if (name.is_register() && name.reg().is(rax)) {
+ // Optimized for case in which name is a constant value.
+ if (name.is_register() && (name.reg().is(rdx) || name.reg().is(rax))) {
+ if (!is_used(rcx)) {
+ name.ToRegister(rcx);
+ } else if (!is_used(rbx)) {
+ name.ToRegister(rbx);
+ } else {
+ ASSERT(!is_used(rdi)); // Only three results are live, so rdi is free.
+ name.ToRegister(rdi);
+ }
+ }
+ // Now name is not in edx or eax, so we can fix them, then move name to ecx.
+ if (value.is_register() && value.reg().is(rdx)) {
+ if (receiver.is_register() && receiver.reg().is(rax)) {
// Wrong registers.
- __ xchg(rax, rcx);
+ __ xchg(rax, rdx);
} else {
- // Register rax is free for value, which frees rcx for name.
+ // Register rax is free for value, which frees rcx for receiver.
value.ToRegister(rax);
- name.ToRegister(rcx);
+ receiver.ToRegister(rdx);
}
} else {
- // Register rcx is free for name, which guarantees rax is free for
+ // Register rcx is free for receiver, which guarantees rax is free for
// value.
- name.ToRegister(rcx);
+ receiver.ToRegister(rdx);
value.ToRegister(rax);
}
-
+ // Receiver and value are in the right place, so rcx is free for name.
+ name.ToRegister(rcx);
name.Unuse();
value.Unuse();
+ receiver.Unuse();
return RawCallCodeObject(ic, RelocInfo::CODE_TARGET);
}
diff --git a/deps/v8/src/x64/virtual-frame-x64.h b/deps/v8/src/x64/virtual-frame-x64.h
index 88cf2bca0..8e3e40f07 100644
--- a/deps/v8/src/x64/virtual-frame-x64.h
+++ b/deps/v8/src/x64/virtual-frame-x64.h
@@ -343,7 +343,7 @@ class VirtualFrame : public ZoneObject {
// of the frame. Key and receiver are not dropped.
Result CallKeyedStoreIC();
- // Call call IC. Arguments, reciever, and function name are found
+ // Call call IC. Arguments, receiver, and function name are found
// on top of the frame. Function name slot is not dropped. The
// argument count does not include the receiver.
Result CallCallIC(RelocInfo::Mode mode, int arg_count, int loop_nesting);
diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc
index 3db7c37f1..f71b3258a 100644
--- a/deps/v8/test/cctest/test-api.cc
+++ b/deps/v8/test/cctest/test-api.cc
@@ -61,6 +61,27 @@ using ::v8::Extension;
namespace i = ::v8::internal;
+static void ExpectString(const char* code, const char* expected) {
+ Local<Value> result = CompileRun(code);
+ CHECK(result->IsString());
+ String::AsciiValue ascii(result);
+ CHECK_EQ(expected, *ascii);
+}
+
+
+static void ExpectBoolean(const char* code, bool expected) {
+ Local<Value> result = CompileRun(code);
+ CHECK(result->IsBoolean());
+ CHECK_EQ(expected, result->BooleanValue());
+}
+
+
+static void ExpectObject(const char* code, Local<Value> expected) {
+ Local<Value> result = CompileRun(code);
+ CHECK(result->Equals(expected));
+}
+
+
static int signature_callback_count;
static v8::Handle<Value> IncrementingSignatureCallback(
const v8::Arguments& args) {
@@ -1130,7 +1151,7 @@ THREADED_TEST(UndefinedIsNotEnumerable) {
v8::Handle<Script> call_recursively_script;
-static const int kTargetRecursionDepth = 300; // near maximum
+static const int kTargetRecursionDepth = 200; // near maximum
static v8::Handle<Value> CallScriptRecursivelyCall(const v8::Arguments& args) {
@@ -2381,6 +2402,36 @@ THREADED_TEST(IndexedInterceptorWithIndexedAccessor) {
}
+static v8::Handle<Value> IdentityIndexedPropertyGetter(
+ uint32_t index,
+ const AccessorInfo& info) {
+ return v8::Integer::New(index);
+}
+
+
+THREADED_TEST(IndexedInterceptorWithNoSetter) {
+ v8::HandleScope scope;
+ Local<ObjectTemplate> templ = ObjectTemplate::New();
+ templ->SetIndexedPropertyHandler(IdentityIndexedPropertyGetter);
+
+ LocalContext context;
+ context->Global()->Set(v8_str("obj"), templ->NewInstance());
+
+ const char* code =
+ "try {"
+ " obj[0] = 239;"
+ " for (var i = 0; i < 100; i++) {"
+ " var v = obj[0];"
+ " if (v != 0) throw 'Wrong value ' + v + ' at iteration ' + i;"
+ " }"
+ " 'PASSED'"
+ "} catch(e) {"
+ " e"
+ "}";
+ ExpectString(code, "PASSED");
+}
+
+
THREADED_TEST(MultiContexts) {
v8::HandleScope scope;
v8::Handle<ObjectTemplate> templ = ObjectTemplate::New();
@@ -2467,27 +2518,6 @@ THREADED_TEST(Regress892105) {
}
-static void ExpectString(const char* code, const char* expected) {
- Local<Value> result = CompileRun(code);
- CHECK(result->IsString());
- String::AsciiValue ascii(result);
- CHECK_EQ(0, strcmp(*ascii, expected));
-}
-
-
-static void ExpectBoolean(const char* code, bool expected) {
- Local<Value> result = CompileRun(code);
- CHECK(result->IsBoolean());
- CHECK_EQ(expected, result->BooleanValue());
-}
-
-
-static void ExpectObject(const char* code, Local<Value> expected) {
- Local<Value> result = CompileRun(code);
- CHECK(result->Equals(expected));
-}
-
-
THREADED_TEST(UndetectableObject) {
v8::HandleScope scope;
LocalContext env;
@@ -2788,6 +2818,10 @@ static const char* kExtensionTestScript =
static v8::Handle<Value> CallFun(const v8::Arguments& args) {
ApiTestFuzzer::Fuzz();
+ if (args.IsConstructCall()) {
+ args.This()->Set(v8_str("data"), args.Data());
+ return v8::Null();
+ }
return args.Data();
}
@@ -2829,6 +2863,25 @@ THREADED_TEST(FunctionLookup) {
}
+THREADED_TEST(NativeFunctionConstructCall) {
+ v8::RegisterExtension(new FunctionExtension());
+ v8::HandleScope handle_scope;
+ static const char* exts[1] = { "functiontest" };
+ v8::ExtensionConfiguration config(1, exts);
+ LocalContext context(&config);
+ for (int i = 0; i < 10; i++) {
+ // Run a few times to ensure that allocation of objects doesn't
+ // change behavior of a constructor function.
+ CHECK_EQ(v8::Integer::New(8),
+ Script::Compile(v8_str("(new A()).data"))->Run());
+ CHECK_EQ(v8::Integer::New(7),
+ Script::Compile(v8_str("(new B()).data"))->Run());
+ CHECK_EQ(v8::Integer::New(6),
+ Script::Compile(v8_str("(new C()).data"))->Run());
+ }
+}
+
+
static const char* last_location;
static const char* last_message;
void StoringErrorCallback(const char* location, const char* message) {
@@ -3549,6 +3602,37 @@ TEST(ApiUncaughtException) {
v8::V8::RemoveMessageListeners(ApiUncaughtExceptionTestListener);
}
+static const char* script_resource_name = "ExceptionInNativeScript.js";
+static void ExceptionInNativeScriptTestListener(v8::Handle<v8::Message> message,
+ v8::Handle<Value>) {
+ v8::Handle<v8::Value> name_val = message->GetScriptResourceName();
+ CHECK(!name_val.IsEmpty() && name_val->IsString());
+ v8::String::AsciiValue name(message->GetScriptResourceName());
+ CHECK_EQ(script_resource_name, *name);
+ CHECK_EQ(3, message->GetLineNumber());
+ v8::String::AsciiValue source_line(message->GetSourceLine());
+ CHECK_EQ(" new o.foo();", *source_line);
+}
+
+TEST(ExceptionInNativeScript) {
+ v8::HandleScope scope;
+ LocalContext env;
+ v8::V8::AddMessageListener(ExceptionInNativeScriptTestListener);
+
+ Local<v8::FunctionTemplate> fun = v8::FunctionTemplate::New(TroubleCallback);
+ v8::Local<v8::Object> global = env->Global();
+ global->Set(v8_str("trouble"), fun->GetFunction());
+
+ Script::Compile(v8_str("function trouble() {\n"
+ " var o = {};\n"
+ " new o.foo();\n"
+ "};"), v8::String::New(script_resource_name))->Run();
+ Local<Value> trouble = global->Get(v8_str("trouble"));
+ CHECK(trouble->IsFunction());
+ Function::Cast(*trouble)->Call(global, 0, NULL);
+ v8::V8::RemoveMessageListeners(ExceptionInNativeScriptTestListener);
+}
+
TEST(CompilationErrorUsingTryCatchHandler) {
v8::HandleScope scope;
@@ -4040,6 +4124,65 @@ THREADED_TEST(AccessControl) {
}
+static bool GetOwnPropertyNamesNamedBlocker(Local<v8::Object> global,
+ Local<Value> name,
+ v8::AccessType type,
+ Local<Value> data) {
+ return false;
+}
+
+
+static bool GetOwnPropertyNamesIndexedBlocker(Local<v8::Object> global,
+ uint32_t key,
+ v8::AccessType type,
+ Local<Value> data) {
+ return false;
+}
+
+
+THREADED_TEST(AccessControlGetOwnPropertyNames) {
+ v8::HandleScope handle_scope;
+ v8::Handle<v8::ObjectTemplate> obj_template = v8::ObjectTemplate::New();
+
+ obj_template->Set(v8_str("x"), v8::Integer::New(42));
+ obj_template->SetAccessCheckCallbacks(GetOwnPropertyNamesNamedBlocker,
+ GetOwnPropertyNamesIndexedBlocker);
+
+ // Create an environment
+ v8::Persistent<Context> context0 = Context::New(NULL, obj_template);
+ context0->Enter();
+
+ v8::Handle<v8::Object> global0 = context0->Global();
+
+ v8::HandleScope scope1;
+
+ v8::Persistent<Context> context1 = Context::New();
+ context1->Enter();
+
+ v8::Handle<v8::Object> global1 = context1->Global();
+ global1->Set(v8_str("other"), global0);
+ global1->Set(v8_str("object"), obj_template->NewInstance());
+
+ v8::Handle<Value> value;
+
+ // Attempt to get the property names of the other global object and
+ // of an object that requires access checks. Accessing the other
+ // global object should be blocked by access checks on the global
+ // proxy object. Accessing the object that requires access checks
+ // is blocked by the access checks on the object itself.
+ value = CompileRun("Object.getOwnPropertyNames(other).length == 0");
+ CHECK(value->IsTrue());
+
+ value = CompileRun("Object.getOwnPropertyNames(object).length == 0");
+ CHECK(value->IsTrue());
+
+ context1->Exit();
+ context0->Exit();
+ context1.Dispose();
+ context0.Dispose();
+}
+
+
static v8::Handle<Value> ConstTenGetter(Local<String> name,
const AccessorInfo& info) {
return v8_num(10);
@@ -4895,8 +5038,7 @@ THREADED_TEST(CallAsFunction) {
CHECK_EQ(17, value->Int32Value());
// Check that the call-as-function handler can be called through
- // new. Currently, there is no way to check in the call-as-function
- // handler if it has been called through new or not.
+ // new.
value = CompileRun("new obj(43)");
CHECK(!try_catch.HasCaught());
CHECK_EQ(-43, value->Int32Value());
@@ -5623,6 +5765,35 @@ THREADED_TEST(InterceptorCallICInvalidatedConstantFunctionViaGlobal) {
}
+// Test the case when actual function to call sits on global object.
+THREADED_TEST(InterceptorCallICCachedFromGlobal) {
+ v8::HandleScope scope;
+ v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New();
+ templ_o->SetNamedPropertyHandler(NoBlockGetterX);
+
+ LocalContext context;
+ context->Global()->Set(v8_str("o"), templ_o->NewInstance());
+
+ v8::Handle<Value> value = CompileRun(
+ "try {"
+ " o.__proto__ = this;"
+ " for (var i = 0; i < 10; i++) {"
+ " var v = o.parseFloat('239');"
+ " if (v != 239) throw v;"
+ // Now it should be ICed and keep a reference to parseFloat.
+ " }"
+ " var result = 0;"
+ " for (var i = 0; i < 10; i++) {"
+ " result += o.parseFloat('239');"
+ " }"
+ " result"
+ "} catch(e) {"
+ " e"
+ "};");
+ CHECK_EQ(239 * 10, value->Int32Value());
+}
+
+
static int interceptor_call_count = 0;
static v8::Handle<Value> InterceptorICRefErrorGetter(Local<String> name,
@@ -6184,8 +6355,16 @@ THREADED_TEST(LockUnlockLock) {
}
-static int GetSurvivingGlobalObjectsCount() {
+static int GetGlobalObjectsCount() {
int count = 0;
+ v8::internal::HeapIterator it;
+ for (i::HeapObject* object = it.next(); object != NULL; object = it.next())
+ if (object->IsJSGlobalObject()) count++;
+ return count;
+}
+
+
+static int GetSurvivingGlobalObjectsCount() {
// We need to collect all garbage twice to be sure that everything
// has been collected. This is because inline caches are cleared in
// the first garbage collection but some of the maps have already
@@ -6193,13 +6372,7 @@ static int GetSurvivingGlobalObjectsCount() {
// collected until the second garbage collection.
v8::internal::Heap::CollectAllGarbage(false);
v8::internal::Heap::CollectAllGarbage(false);
- v8::internal::HeapIterator it;
- while (it.has_next()) {
- v8::internal::HeapObject* object = it.next();
- if (object->IsJSGlobalObject()) {
- count++;
- }
- }
+ int count = GetGlobalObjectsCount();
#ifdef DEBUG
if (count > 0) v8::internal::Heap::TracePathToGlobal();
#endif
@@ -8569,17 +8742,6 @@ THREADED_TEST(SpaghettiStackReThrow) {
}
-static int GetGlobalObjectsCount() {
- int count = 0;
- v8::internal::HeapIterator it;
- while (it.has_next()) {
- v8::internal::HeapObject* object = it.next();
- if (object->IsJSGlobalObject()) count++;
- }
- return count;
-}
-
-
TEST(Regress528) {
v8::V8::Initialize();
diff --git a/deps/v8/test/cctest/test-assembler-arm.cc b/deps/v8/test/cctest/test-assembler-arm.cc
index f6e4d046b..459b8624c 100644
--- a/deps/v8/test/cctest/test-assembler-arm.cc
+++ b/deps/v8/test/cctest/test-assembler-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -224,4 +224,63 @@ TEST(3) {
}
+TEST(4) {
+ // Test the VFP floating point instructions.
+ InitializeVM();
+ v8::HandleScope scope;
+
+ typedef struct {
+ double a;
+ double b;
+ double c;
+ } T;
+ T t;
+
+ // Create a function that accepts &t, and loads, manipulates, and stores
+ // the doubles t.a, t.b, and t.c.
+ Assembler assm(NULL, 0);
+ Label L, C;
+
+
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+
+ __ mov(ip, Operand(sp));
+ __ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
+ __ sub(fp, ip, Operand(4));
+
+ __ mov(r4, Operand(r0));
+ __ vldr(d6, r4, OFFSET_OF(T, a));
+ __ vldr(d7, r4, OFFSET_OF(T, b));
+ __ vadd(d5, d6, d7);
+ __ vstr(d5, r4, OFFSET_OF(T, c));
+
+ __ vmov(r2, r3, d5);
+ __ vmov(d4, r2, r3);
+ __ vstr(d4, r4, OFFSET_OF(T, b));
+
+ __ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit());
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Object* code = Heap::CreateCode(desc,
+ NULL,
+ Code::ComputeFlags(Code::STUB),
+ Handle<Object>(Heap::undefined_value()));
+ CHECK(code->IsCode());
+#ifdef DEBUG
+ Code::cast(code)->Print();
+#endif
+ F3 f = FUNCTION_CAST<F3>(Code::cast(code)->entry());
+ t.a = 1.5;
+ t.b = 2.75;
+ t.c = 17.17;
+ Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ USE(dummy);
+ CHECK_EQ(4.25, t.c);
+ CHECK_EQ(4.25, t.b);
+ CHECK_EQ(1.5, t.a);
+ }
+}
+
#undef __
diff --git a/deps/v8/test/cctest/test-compiler.cc b/deps/v8/test/cctest/test-compiler.cc
index 8c9e4ee57..05c29d710 100644
--- a/deps/v8/test/cctest/test-compiler.cc
+++ b/deps/v8/test/cctest/test-compiler.cc
@@ -26,7 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <stdlib.h>
-#include <wchar.h> // wint_t
+#include <wchar.h> // wint_t
#include "v8.h"
@@ -74,7 +74,7 @@ v8::Handle<v8::Value> PrintExtension::Print(const v8::Arguments& args) {
uint16_t* string = NewArray<uint16_t>(length + 1);
string_obj->Write(string);
for (int j = 0; j < length; j++)
- printf("%lc", (wint_t)string[j]);
+ printf("%lc", static_cast<wint_t>(string[j]));
DeleteArray(string);
}
printf("\n");
diff --git a/deps/v8/test/cctest/test-debug.cc b/deps/v8/test/cctest/test-debug.cc
index cad1ba3ae..92e18e068 100644
--- a/deps/v8/test/cctest/test-debug.cc
+++ b/deps/v8/test/cctest/test-debug.cc
@@ -413,9 +413,7 @@ void CheckDebuggerUnloaded(bool check_functions) {
// Iterate the head and check that there are no debugger related objects left.
HeapIterator iterator;
- while (iterator.has_next()) {
- HeapObject* obj = iterator.next();
- CHECK(obj != NULL);
+ for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
CHECK(!obj->IsDebugInfo());
CHECK(!obj->IsBreakPointInfo());
@@ -443,6 +441,9 @@ void CheckDebuggerUnloaded(bool check_functions) {
// Check that the debugger has been fully unloaded.
static void CheckDebuggerUnloaded(bool check_functions = false) {
+ // Let debugger to unload itself synchronously
+ v8::Debug::ProcessDebugMessages();
+
v8::internal::CheckDebuggerUnloaded(check_functions);
}
@@ -2046,6 +2047,33 @@ TEST(DebuggerStatement) {
}
+// Test setting a breakpoint on the debugger statement.
+TEST(DebuggerStatementBreakpoint) {
+ break_point_hit_count = 0;
+ v8::HandleScope scope;
+ DebugLocalContext env;
+ v8::Debug::SetDebugEventListener(DebugEventBreakPointHitCount,
+ v8::Undefined());
+ v8::Script::Compile(v8::String::New("function foo(){debugger;}"))->Run();
+ v8::Local<v8::Function> foo =
+ v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("foo")));
+
+ // The debugger statement triggers breakpint hit
+ foo->Call(env->Global(), 0, NULL);
+ CHECK_EQ(1, break_point_hit_count);
+
+ int bp = SetBreakPoint(foo, 0);
+
+ // Set breakpoint does not duplicate hits
+ foo->Call(env->Global(), 0, NULL);
+ CHECK_EQ(2, break_point_hit_count);
+
+ ClearBreakPoint(bp);
+ v8::Debug::SetDebugEventListener(NULL);
+ CheckDebuggerUnloaded();
+}
+
+
// Thest that the evaluation of expressions when a break point is hit generates
// the correct results.
TEST(DebugEvaluate) {
@@ -2160,6 +2188,168 @@ TEST(DebugEvaluate) {
CheckDebuggerUnloaded();
}
+// Copies a C string to a 16-bit string. Does not check for buffer overflow.
+// Does not use the V8 engine to convert strings, so it can be used
+// in any thread. Returns the length of the string.
+int AsciiToUtf16(const char* input_buffer, uint16_t* output_buffer) {
+ int i;
+ for (i = 0; input_buffer[i] != '\0'; ++i) {
+ // ASCII does not use chars > 127, but be careful anyway.
+ output_buffer[i] = static_cast<unsigned char>(input_buffer[i]);
+ }
+ output_buffer[i] = 0;
+ return i;
+}
+
+// Copies a 16-bit string to a C string by dropping the high byte of
+// each character. Does not check for buffer overflow.
+// Can be used in any thread. Requires string length as an input.
+int Utf16ToAscii(const uint16_t* input_buffer, int length,
+ char* output_buffer, int output_len = -1) {
+ if (output_len >= 0) {
+ if (length > output_len - 1) {
+ length = output_len - 1;
+ }
+ }
+
+ for (int i = 0; i < length; ++i) {
+ output_buffer[i] = static_cast<char>(input_buffer[i]);
+ }
+ output_buffer[length] = '\0';
+ return length;
+}
+
+
+// We match parts of the message to get evaluate result int value.
+bool GetEvaluateStringResult(char *message, char* buffer, int buffer_size) {
+ if (strstr(message, "\"command\":\"evaluate\"") == NULL) {
+ return false;
+ }
+ const char* prefix = "\"text\":\"";
+ char* pos1 = strstr(message, prefix);
+ if (pos1 == NULL) {
+ return false;
+ }
+ pos1 += strlen(prefix);
+ char* pos2 = strchr(pos1, '"');
+ if (pos2 == NULL) {
+ return false;
+ }
+ Vector<char> buf(buffer, buffer_size);
+ int len = static_cast<int>(pos2 - pos1);
+ if (len > buffer_size - 1) {
+ len = buffer_size - 1;
+ }
+ OS::StrNCpy(buf, pos1, len);
+ buffer[buffer_size - 1] = '\0';
+ return true;
+}
+
+
+struct EvaluateResult {
+ static const int kBufferSize = 20;
+ char buffer[kBufferSize];
+};
+
+struct DebugProcessDebugMessagesData {
+ static const int kArraySize = 5;
+ int counter;
+ EvaluateResult results[kArraySize];
+
+ void reset() {
+ counter = 0;
+ }
+ EvaluateResult* current() {
+ return &results[counter % kArraySize];
+ }
+ void next() {
+ counter++;
+ }
+};
+
+DebugProcessDebugMessagesData process_debug_messages_data;
+
+static void DebugProcessDebugMessagesHandler(
+ const uint16_t* message,
+ int length,
+ v8::Debug::ClientData* client_data) {
+
+ const int kBufferSize = 100000;
+ char print_buffer[kBufferSize];
+ Utf16ToAscii(message, length, print_buffer, kBufferSize);
+
+ EvaluateResult* array_item = process_debug_messages_data.current();
+
+ bool res = GetEvaluateStringResult(print_buffer,
+ array_item->buffer,
+ EvaluateResult::kBufferSize);
+ if (res) {
+ process_debug_messages_data.next();
+ }
+}
+
+// Test that the evaluation of expressions works even from ProcessDebugMessages
+// i.e. with empty stack.
+TEST(DebugEvaluateWithoutStack) {
+ v8::Debug::SetMessageHandler(DebugProcessDebugMessagesHandler);
+
+ v8::HandleScope scope;
+ DebugLocalContext env;
+
+ const char* source =
+ "var v1 = 'Pinguin';\n function getAnimal() { return 'Capy' + 'bara'; }";
+
+ v8::Script::Compile(v8::String::New(source))->Run();
+
+ v8::Debug::ProcessDebugMessages();
+
+ const int kBufferSize = 1000;
+ uint16_t buffer[kBufferSize];
+
+ const char* command_111 = "{\"seq\":111,"
+ "\"type\":\"request\","
+ "\"command\":\"evaluate\","
+ "\"arguments\":{"
+ " \"global\":true,"
+ " \"expression\":\"v1\",\"disable_break\":true"
+ "}}";
+
+ v8::Debug::SendCommand(buffer, AsciiToUtf16(command_111, buffer));
+
+ const char* command_112 = "{\"seq\":112,"
+ "\"type\":\"request\","
+ "\"command\":\"evaluate\","
+ "\"arguments\":{"
+ " \"global\":true,"
+ " \"expression\":\"getAnimal()\",\"disable_break\":true"
+ "}}";
+
+ v8::Debug::SendCommand(buffer, AsciiToUtf16(command_112, buffer));
+
+ const char* command_113 = "{\"seq\":113,"
+ "\"type\":\"request\","
+ "\"command\":\"evaluate\","
+ "\"arguments\":{"
+ " \"global\":true,"
+ " \"expression\":\"239 + 566\",\"disable_break\":true"
+ "}}";
+
+ v8::Debug::SendCommand(buffer, AsciiToUtf16(command_113, buffer));
+
+ v8::Debug::ProcessDebugMessages();
+
+ CHECK_EQ(3, process_debug_messages_data.counter);
+
+ CHECK_EQ(strcmp("Pinguin", process_debug_messages_data.results[0].buffer), 0);
+ CHECK_EQ(strcmp("Capybara", process_debug_messages_data.results[1].buffer),
+ 0);
+ CHECK_EQ(strcmp("805", process_debug_messages_data.results[2].buffer), 0);
+
+ v8::Debug::SetMessageHandler(NULL);
+ v8::Debug::SetDebugEventListener(NULL);
+ CheckDebuggerUnloaded();
+}
+
// Simple test of the stepping mechanism using only store ICs.
TEST(DebugStepLinear) {
@@ -3590,31 +3780,6 @@ TEST(NoHiddenProperties) {
// Support classes
-// Copies a C string to a 16-bit string. Does not check for buffer overflow.
-// Does not use the V8 engine to convert strings, so it can be used
-// in any thread. Returns the length of the string.
-int AsciiToUtf16(const char* input_buffer, uint16_t* output_buffer) {
- int i;
- for (i = 0; input_buffer[i] != '\0'; ++i) {
- // ASCII does not use chars > 127, but be careful anyway.
- output_buffer[i] = static_cast<unsigned char>(input_buffer[i]);
- }
- output_buffer[i] = 0;
- return i;
-}
-
-// Copies a 16-bit string to a C string by dropping the high byte of
-// each character. Does not check for buffer overflow.
-// Can be used in any thread. Requires string length as an input.
-int Utf16ToAscii(const uint16_t* input_buffer, int length,
- char* output_buffer) {
- for (int i = 0; i < length; ++i) {
- output_buffer[i] = static_cast<char>(input_buffer[i]);
- }
- output_buffer[length] = '\0';
- return length;
-}
-
// Provides synchronization between k threads, where k is an input to the
// constructor. The Wait() call blocks a thread until it is called for the
// k'th time, then all calls return. Each ThreadBarrier object can only
@@ -3745,6 +3910,23 @@ int GetBreakpointIdFromBreakEventMessage(char *message) {
}
+// We match parts of the message to get total frames number.
+int GetTotalFramesInt(char *message) {
+ const char* prefix = "\"totalFrames\":";
+ char* pos = strstr(message, prefix);
+ if (pos == NULL) {
+ return -1;
+ }
+ pos += strlen(prefix);
+ char* pos_end = pos;
+ int res = static_cast<int>(strtol(pos, &pos_end, 10));
+ if (pos_end == pos) {
+ return -1;
+ }
+ return res;
+}
+
+
/* Test MessageQueues */
/* Tests the message queues that hold debugger commands and
* response messages to the debugger. Fills queues and makes
@@ -4108,7 +4290,12 @@ class BreakpointsV8Thread : public v8::internal::Thread {
class BreakpointsDebuggerThread : public v8::internal::Thread {
public:
+ explicit BreakpointsDebuggerThread(bool global_evaluate)
+ : global_evaluate_(global_evaluate) {}
void Run();
+
+ private:
+ bool global_evaluate_;
};
@@ -4176,24 +4363,51 @@ void BreakpointsDebuggerThread::Run() {
"\"type\":\"request\","
"\"command\":\"setbreakpoint\","
"\"arguments\":{\"type\":\"function\",\"target\":\"dog\",\"line\":3}}";
- const char* command_3 = "{\"seq\":103,"
- "\"type\":\"request\","
- "\"command\":\"evaluate\","
- "\"arguments\":{\"expression\":\"dog()\",\"disable_break\":false}}";
- const char* command_4 = "{\"seq\":104,"
- "\"type\":\"request\","
- "\"command\":\"evaluate\","
- "\"arguments\":{\"expression\":\"x + 1\",\"disable_break\":true}}";
+ const char* command_3;
+ if (this->global_evaluate_) {
+ command_3 = "{\"seq\":103,"
+ "\"type\":\"request\","
+ "\"command\":\"evaluate\","
+ "\"arguments\":{\"expression\":\"dog()\",\"disable_break\":false,"
+ "\"global\":true}}";
+ } else {
+ command_3 = "{\"seq\":103,"
+ "\"type\":\"request\","
+ "\"command\":\"evaluate\","
+ "\"arguments\":{\"expression\":\"dog()\",\"disable_break\":false}}";
+ }
+ const char* command_4;
+ if (this->global_evaluate_) {
+ command_4 = "{\"seq\":104,"
+ "\"type\":\"request\","
+ "\"command\":\"evaluate\","
+ "\"arguments\":{\"expression\":\"100 + 8\",\"disable_break\":true,"
+ "\"global\":true}}";
+ } else {
+ command_4 = "{\"seq\":104,"
+ "\"type\":\"request\","
+ "\"command\":\"evaluate\","
+ "\"arguments\":{\"expression\":\"x + 1\",\"disable_break\":true}}";
+ }
const char* command_5 = "{\"seq\":105,"
"\"type\":\"request\","
"\"command\":\"continue\"}";
const char* command_6 = "{\"seq\":106,"
"\"type\":\"request\","
"\"command\":\"continue\"}";
- const char* command_7 = "{\"seq\":107,"
- "\"type\":\"request\","
- "\"command\":\"evaluate\","
- "\"arguments\":{\"expression\":\"dog()\",\"disable_break\":true}}";
+ const char* command_7;
+ if (this->global_evaluate_) {
+ command_7 = "{\"seq\":107,"
+ "\"type\":\"request\","
+ "\"command\":\"evaluate\","
+ "\"arguments\":{\"expression\":\"dog()\",\"disable_break\":true,"
+ "\"global\":true}}";
+ } else {
+ command_7 = "{\"seq\":107,"
+ "\"type\":\"request\","
+ "\"command\":\"evaluate\","
+ "\"arguments\":{\"expression\":\"dog()\",\"disable_break\":true}}";
+ }
const char* command_8 = "{\"seq\":108,"
"\"type\":\"request\","
"\"command\":\"continue\"}";
@@ -4250,12 +4464,12 @@ void BreakpointsDebuggerThread::Run() {
v8::Debug::SendCommand(buffer, AsciiToUtf16(command_8, buffer));
}
-BreakpointsDebuggerThread breakpoints_debugger_thread;
-BreakpointsV8Thread breakpoints_v8_thread;
-
-TEST(RecursiveBreakpoints) {
+void TestRecursiveBreakpointsGeneric(bool global_evaluate) {
i::FLAG_debugger_auto_break = true;
+ BreakpointsDebuggerThread breakpoints_debugger_thread(global_evaluate);
+ BreakpointsV8Thread breakpoints_v8_thread;
+
// Create a V8 environment
Barriers stack_allocated_breakpoints_barriers;
stack_allocated_breakpoints_barriers.Initialize();
@@ -4268,6 +4482,14 @@ TEST(RecursiveBreakpoints) {
breakpoints_debugger_thread.Join();
}
+TEST(RecursiveBreakpoints) {
+ TestRecursiveBreakpointsGeneric(false);
+}
+
+TEST(RecursiveBreakpointsGlobal) {
+ TestRecursiveBreakpointsGeneric(true);
+}
+
static void DummyDebugEventListener(v8::DebugEvent event,
v8::Handle<v8::Object> exec_state,
@@ -5655,6 +5877,103 @@ TEST(NoDebugBreakInAfterCompileMessageHandler) {
}
+static int counting_message_handler_counter;
+
+static void CountingMessageHandler(const v8::Debug::Message& message) {
+ counting_message_handler_counter++;
+}
+
+// Test that debug messages get processed when ProcessDebugMessages is called.
+TEST(ProcessDebugMessages) {
+ v8::HandleScope scope;
+ DebugLocalContext env;
+
+ counting_message_handler_counter = 0;
+
+ v8::Debug::SetMessageHandler2(CountingMessageHandler);
+
+ const int kBufferSize = 1000;
+ uint16_t buffer[kBufferSize];
+ const char* scripts_command =
+ "{\"seq\":0,"
+ "\"type\":\"request\","
+ "\"command\":\"scripts\"}";
+
+ // Send scripts command.
+ v8::Debug::SendCommand(buffer, AsciiToUtf16(scripts_command, buffer));
+
+ CHECK_EQ(0, counting_message_handler_counter);
+ v8::Debug::ProcessDebugMessages();
+ // At least one message should come
+ CHECK_GE(counting_message_handler_counter, 1);
+
+ counting_message_handler_counter = 0;
+
+ v8::Debug::SendCommand(buffer, AsciiToUtf16(scripts_command, buffer));
+ v8::Debug::SendCommand(buffer, AsciiToUtf16(scripts_command, buffer));
+ CHECK_EQ(0, counting_message_handler_counter);
+ v8::Debug::ProcessDebugMessages();
+ // At least two messages should come
+ CHECK_GE(counting_message_handler_counter, 2);
+
+ // Get rid of the debug message handler.
+ v8::Debug::SetMessageHandler2(NULL);
+ CheckDebuggerUnloaded();
+}
+
+
+struct BracktraceData {
+ static int frame_counter;
+ static void MessageHandler(const v8::Debug::Message& message) {
+ char print_buffer[1000];
+ v8::String::Value json(message.GetJSON());
+ Utf16ToAscii(*json, json.length(), print_buffer, 1000);
+
+ if (strstr(print_buffer, "backtrace") == NULL) {
+ return;
+ }
+ frame_counter = GetTotalFramesInt(print_buffer);
+ }
+};
+
+int BracktraceData::frame_counter;
+
+
+// Test that debug messages get processed when ProcessDebugMessages is called.
+TEST(Backtrace) {
+ v8::HandleScope scope;
+ DebugLocalContext env;
+
+ v8::Debug::SetMessageHandler2(BracktraceData::MessageHandler);
+
+ const int kBufferSize = 1000;
+ uint16_t buffer[kBufferSize];
+ const char* scripts_command =
+ "{\"seq\":0,"
+ "\"type\":\"request\","
+ "\"command\":\"backtrace\"}";
+
+ // Check backtrace from ProcessDebugMessages.
+ BracktraceData::frame_counter = -10;
+ v8::Debug::SendCommand(buffer, AsciiToUtf16(scripts_command, buffer));
+ v8::Debug::ProcessDebugMessages();
+ CHECK_EQ(BracktraceData::frame_counter, 0);
+
+ v8::Handle<v8::String> void0 = v8::String::New("void(0)");
+ v8::Handle<v8::Script> script = v8::Script::Compile(void0, void0);
+
+ // Check backtrace from "void(0)" script.
+ BracktraceData::frame_counter = -10;
+ v8::Debug::SendCommand(buffer, AsciiToUtf16(scripts_command, buffer));
+ script->Run();
+ CHECK_EQ(BracktraceData::frame_counter, 1);
+
+ // Get rid of the debug message handler.
+ v8::Debug::SetMessageHandler2(NULL);
+ CheckDebuggerUnloaded();
+}
+
+
TEST(GetMirror) {
v8::HandleScope scope;
DebugLocalContext env;
diff --git a/deps/v8/test/cctest/test-disasm-ia32.cc b/deps/v8/test/cctest/test-disasm-ia32.cc
index ba4eec26a..7b0ad99e8 100644
--- a/deps/v8/test/cctest/test-disasm-ia32.cc
+++ b/deps/v8/test/cctest/test-disasm-ia32.cc
@@ -101,6 +101,8 @@ TEST(DisasmIa320) {
__ cmp(Operand(ebp, ecx, times_4, 0), Immediate(1000));
Handle<FixedArray> foo2 = Factory::NewFixedArray(10, TENURED);
__ cmp(ebx, foo2);
+ __ cmpb(ebx, Operand(ebp, ecx, times_2, 0));
+ __ cmpb(Operand(ebp, ecx, times_2, 0), ebx);
__ or_(edx, 3);
__ xor_(edx, 3);
__ nop();
diff --git a/deps/v8/test/cctest/test-heap-profiler.cc b/deps/v8/test/cctest/test-heap-profiler.cc
index 04e0037b0..295b0ee06 100644
--- a/deps/v8/test/cctest/test-heap-profiler.cc
+++ b/deps/v8/test/cctest/test-heap-profiler.cc
@@ -64,10 +64,8 @@ TEST(ConstructorProfile) {
ConstructorHeapProfileTestHelper cons_profile;
i::AssertNoAllocation no_alloc;
i::HeapIterator iterator;
- while (iterator.has_next()) {
- i::HeapObject* obj = iterator.next();
+ for (i::HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next())
cons_profile.CollectStats(obj);
- }
CHECK_EQ(0, cons_profile.f_count());
cons_profile.PrintStats();
CHECK_EQ(2, cons_profile.f_count());
@@ -375,10 +373,8 @@ TEST(RetainerProfile) {
RetainerHeapProfile ret_profile;
i::AssertNoAllocation no_alloc;
i::HeapIterator iterator;
- while (iterator.has_next()) {
- i::HeapObject* obj = iterator.next();
+ for (i::HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next())
ret_profile.CollectStats(obj);
- }
RetainerProfilePrinter printer;
ret_profile.DebugPrintStats(&printer);
const char* retainers_of_a = printer.GetRetainers("A");
diff --git a/deps/v8/test/cctest/test-heap.cc b/deps/v8/test/cctest/test-heap.cc
index 17bee5b06..d36286bb6 100644
--- a/deps/v8/test/cctest/test-heap.cc
+++ b/deps/v8/test/cctest/test-heap.cc
@@ -746,16 +746,13 @@ static int ObjectsFoundInHeap(Handle<Object> objs[], int size) {
// Count the number of objects found in the heap.
int found_count = 0;
HeapIterator iterator;
- while (iterator.has_next()) {
- HeapObject* obj = iterator.next();
- CHECK(obj != NULL);
+ for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
for (int i = 0; i < size; i++) {
if (*objs[i] == obj) {
found_count++;
}
}
}
- CHECK(!iterator.has_next());
return found_count;
}
diff --git a/deps/v8/test/cctest/test-log-stack-tracer.cc b/deps/v8/test/cctest/test-log-stack-tracer.cc
index 68cbc2619..8ea9f7901 100644
--- a/deps/v8/test/cctest/test-log-stack-tracer.cc
+++ b/deps/v8/test/cctest/test-log-stack-tracer.cc
@@ -47,10 +47,10 @@ static void InitTraceEnv(TickSample* sample) {
static void DoTrace(Address fp) {
- trace_env.sample->fp = reinterpret_cast<uintptr_t>(fp);
+ trace_env.sample->fp = fp;
// sp is only used to define stack high bound
trace_env.sample->sp =
- reinterpret_cast<uintptr_t>(trace_env.sample) - 10240;
+ reinterpret_cast<Address>(trace_env.sample) - 10240;
StackTracer::Trace(trace_env.sample);
}
@@ -315,6 +315,9 @@ TEST(PureJSStackTrace) {
" JSTrace();"
"};\n"
"OuterJSTrace();");
+ // The last JS function called.
+ CHECK_EQ(GetGlobalJSFunction("JSFuncDoTrace")->address(),
+ sample.function);
CHECK_GT(sample.frames_count, 1);
// Stack sampling will start from the caller of JSFuncDoTrace, i.e. "JSTrace"
CheckRetAddrIsInJSFunction("JSTrace",
diff --git a/deps/v8/test/cctest/test-log.cc b/deps/v8/test/cctest/test-log.cc
index 85ff331a6..eca2c2b67 100644
--- a/deps/v8/test/cctest/test-log.cc
+++ b/deps/v8/test/cctest/test-log.cc
@@ -202,9 +202,9 @@ static int CheckThatProfilerWorks(int log_pos) {
// Force compiler to generate new code by parametrizing source.
EmbeddedVector<char, 100> script_src;
i::OS::SNPrintF(script_src,
- "for (var i = 0; i < 1000; ++i) { "
- "(function(x) { return %d * x; })(i); }",
- log_pos);
+ "function f%d(x) { return %d * x; }"
+ "for (var i = 0; i < 10000; ++i) { f%d(i); }",
+ log_pos, log_pos, log_pos);
// Run code for 200 msecs to get some ticks.
const double end_time = i::OS::TimeCurrentMillis() + 200;
while (i::OS::TimeCurrentMillis() < end_time) {
@@ -228,6 +228,7 @@ static int CheckThatProfilerWorks(int log_pos) {
log_pos += log_size;
// Check buffer contents.
buffer[log_size] = '\0';
+ printf("%s", buffer.start());
const char* tick = "\ntick,";
CHECK_NE(NULL, strstr(buffer.start(), code_creation));
const bool ticks_found = strstr(buffer.start(), tick) != NULL;
diff --git a/deps/v8/test/cctest/test-mark-compact.cc b/deps/v8/test/cctest/test-mark-compact.cc
index e56f0f47e..5c7b57cf8 100644
--- a/deps/v8/test/cctest/test-mark-compact.cc
+++ b/deps/v8/test/cctest/test-mark-compact.cc
@@ -207,6 +207,36 @@ TEST(MarkCompactCollector) {
}
+static Handle<Map> CreateMap() {
+ return Factory::NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
+}
+
+
+TEST(MapCompact) {
+ FLAG_max_map_space_pages = 16;
+ InitializeVM();
+
+ {
+ v8::HandleScope sc;
+ // keep allocating maps while pointers are still encodable and thus
+ // mark compact is permitted.
+ Handle<JSObject> root = Factory::NewJSObjectFromMap(CreateMap());
+ do {
+ Handle<Map> map = CreateMap();
+ map->set_prototype(*root);
+ root = Factory::NewJSObjectFromMap(map);
+ } while (Heap::map_space()->MapPointersEncodable());
+ }
+ // Now, as we don't have any handles to just allocated maps, we should
+ // be able to trigger map compaction.
+ // To give an additional chance to fail, try to force compaction which
+ // should be impossible right now.
+ Heap::CollectAllGarbage(true);
+ // And now map pointers should be encodable again.
+ CHECK(Heap::map_space()->MapPointersEncodable());
+}
+
+
static int gc_starts = 0;
static int gc_ends = 0;
diff --git a/deps/v8/test/cctest/test-regexp.cc b/deps/v8/test/cctest/test-regexp.cc
index c72c4d19f..b1ca45aaa 100644
--- a/deps/v8/test/cctest/test-regexp.cc
+++ b/deps/v8/test/cctest/test-regexp.cc
@@ -679,16 +679,14 @@ static ArchRegExpMacroAssembler::Result Execute(Code* code,
int start_offset,
const byte* input_start,
const byte* input_end,
- int* captures,
- bool at_start) {
+ int* captures) {
return NativeRegExpMacroAssembler::Execute(
code,
input,
start_offset,
input_start,
input_end,
- captures,
- at_start);
+ captures);
}
@@ -716,8 +714,7 @@ TEST(MacroAssemblerNativeSuccess) {
0,
start_adr,
start_adr + seq_input->length(),
- captures,
- true);
+ captures);
CHECK_EQ(NativeRegExpMacroAssembler::SUCCESS, result);
CHECK_EQ(-1, captures[0]);
@@ -760,8 +757,7 @@ TEST(MacroAssemblerNativeSimple) {
0,
start_adr,
start_adr + input->length(),
- captures,
- true);
+ captures);
CHECK_EQ(NativeRegExpMacroAssembler::SUCCESS, result);
CHECK_EQ(0, captures[0]);
@@ -778,8 +774,7 @@ TEST(MacroAssemblerNativeSimple) {
0,
start_adr,
start_adr + input->length(),
- captures,
- true);
+ captures);
CHECK_EQ(NativeRegExpMacroAssembler::FAILURE, result);
}
@@ -820,8 +815,7 @@ TEST(MacroAssemblerNativeSimpleUC16) {
0,
start_adr,
start_adr + input->length(),
- captures,
- true);
+ captures);
CHECK_EQ(NativeRegExpMacroAssembler::SUCCESS, result);
CHECK_EQ(0, captures[0]);
@@ -839,8 +833,7 @@ TEST(MacroAssemblerNativeSimpleUC16) {
0,
start_adr,
start_adr + input->length() * 2,
- captures,
- true);
+ captures);
CHECK_EQ(NativeRegExpMacroAssembler::FAILURE, result);
}
@@ -877,8 +870,7 @@ TEST(MacroAssemblerNativeBacktrack) {
0,
start_adr,
start_adr + input->length(),
- NULL,
- true);
+ NULL);
CHECK_EQ(NativeRegExpMacroAssembler::FAILURE, result);
}
@@ -920,8 +912,7 @@ TEST(MacroAssemblerNativeBackReferenceASCII) {
0,
start_adr,
start_adr + input->length(),
- output,
- true);
+ output);
CHECK_EQ(NativeRegExpMacroAssembler::SUCCESS, result);
CHECK_EQ(0, output[0]);
@@ -969,8 +960,7 @@ TEST(MacroAssemblerNativeBackReferenceUC16) {
0,
start_adr,
start_adr + input->length() * 2,
- output,
- true);
+ output);
CHECK_EQ(NativeRegExpMacroAssembler::SUCCESS, result);
CHECK_EQ(0, output[0]);
@@ -1022,8 +1012,7 @@ TEST(MacroAssemblernativeAtStart) {
0,
start_adr,
start_adr + input->length(),
- NULL,
- true);
+ NULL);
CHECK_EQ(NativeRegExpMacroAssembler::SUCCESS, result);
@@ -1032,8 +1021,7 @@ TEST(MacroAssemblernativeAtStart) {
3,
start_adr + 3,
start_adr + input->length(),
- NULL,
- false);
+ NULL);
CHECK_EQ(NativeRegExpMacroAssembler::SUCCESS, result);
}
@@ -1084,8 +1072,7 @@ TEST(MacroAssemblerNativeBackRefNoCase) {
0,
start_adr,
start_adr + input->length(),
- output,
- true);
+ output);
CHECK_EQ(NativeRegExpMacroAssembler::SUCCESS, result);
CHECK_EQ(0, output[0]);
@@ -1184,8 +1171,7 @@ TEST(MacroAssemblerNativeRegisters) {
0,
start_adr,
start_adr + input->length(),
- output,
- true);
+ output);
CHECK_EQ(NativeRegExpMacroAssembler::SUCCESS, result);
CHECK_EQ(0, output[0]);
@@ -1225,8 +1211,7 @@ TEST(MacroAssemblerStackOverflow) {
0,
start_adr,
start_adr + input->length(),
- NULL,
- true);
+ NULL);
CHECK_EQ(NativeRegExpMacroAssembler::EXCEPTION, result);
CHECK(Top::has_pending_exception());
@@ -1271,8 +1256,7 @@ TEST(MacroAssemblerNativeLotsOfRegisters) {
0,
start_adr,
start_adr + input->length(),
- captures,
- true);
+ captures);
CHECK_EQ(NativeRegExpMacroAssembler::SUCCESS, result);
CHECK_EQ(0, captures[0]);
@@ -1650,6 +1634,163 @@ TEST(CanonicalizeCharacterSets) {
ASSERT_EQ(30, list->at(0).to());
}
+// Checks whether a character is in the set represented by a list of ranges.
+static bool CharacterInSet(ZoneList<CharacterRange>* set, uc16 value) {
+ for (int i = 0; i < set->length(); i++) {
+ CharacterRange range = set->at(i);
+ if (range.from() <= value && value <= range.to()) {
+ return true;
+ }
+ }
+ return false;
+}
+
+TEST(CharacterRangeMerge) {
+ ZoneScope zone_scope(DELETE_ON_EXIT);
+ ZoneList<CharacterRange> l1(4);
+ ZoneList<CharacterRange> l2(4);
+ // Create all combinations of intersections of ranges, both singletons and
+ // longer.
+
+ int offset = 0;
+
+ // The five kinds of singleton intersections:
+ // X
+ // Y - outside before
+ // Y - outside touching start
+ // Y - overlap
+ // Y - outside touching end
+ // Y - outside after
+
+ for (int i = 0; i < 5; i++) {
+ l1.Add(CharacterRange::Singleton(offset + 2));
+ l2.Add(CharacterRange::Singleton(offset + i));
+ offset += 6;
+ }
+
+ // The seven kinds of singleton/non-singleton intersections:
+ // XXX
+ // Y - outside before
+ // Y - outside touching start
+ // Y - inside touching start
+ // Y - entirely inside
+ // Y - inside touching end
+ // Y - outside touching end
+ // Y - disjoint after
+
+ for (int i = 0; i < 7; i++) {
+ l1.Add(CharacterRange::Range(offset + 2, offset + 4));
+ l2.Add(CharacterRange::Singleton(offset + i));
+ offset += 8;
+ }
+
+ // The eleven kinds of non-singleton intersections:
+ //
+ // XXXXXXXX
+ // YYYY - outside before.
+ // YYYY - outside touching start.
+ // YYYY - overlapping start
+ // YYYY - inside touching start
+ // YYYY - entirely inside
+ // YYYY - inside touching end
+ // YYYY - overlapping end
+ // YYYY - outside touching end
+ // YYYY - outside after
+ // YYYYYYYY - identical
+ // YYYYYYYYYYYY - containing entirely.
+
+ for (int i = 0; i < 9; i++) {
+ l1.Add(CharacterRange::Range(offset + 6, offset + 15)); // Length 8.
+ l2.Add(CharacterRange::Range(offset + 2 * i, offset + 2 * i + 3));
+ offset += 22;
+ }
+ l1.Add(CharacterRange::Range(offset + 6, offset + 15));
+ l2.Add(CharacterRange::Range(offset + 6, offset + 15));
+ offset += 22;
+ l1.Add(CharacterRange::Range(offset + 6, offset + 15));
+ l2.Add(CharacterRange::Range(offset + 4, offset + 17));
+ offset += 22;
+
+ // Different kinds of multi-range overlap:
+ // XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXX
+ // YYYY Y YYYY Y YYYY Y YYYY Y YYYY Y YYYY Y
+
+ l1.Add(CharacterRange::Range(offset, offset + 21));
+ l1.Add(CharacterRange::Range(offset + 31, offset + 52));
+ for (int i = 0; i < 6; i++) {
+ l2.Add(CharacterRange::Range(offset + 2, offset + 5));
+ l2.Add(CharacterRange::Singleton(offset + 8));
+ offset += 9;
+ }
+
+ ASSERT(CharacterRange::IsCanonical(&l1));
+ ASSERT(CharacterRange::IsCanonical(&l2));
+
+ ZoneList<CharacterRange> first_only(4);
+ ZoneList<CharacterRange> second_only(4);
+ ZoneList<CharacterRange> both(4);
+
+ // Merge one direction.
+ CharacterRange::Merge(&l1, &l2, &first_only, &second_only, &both);
+
+ CHECK(CharacterRange::IsCanonical(&first_only));
+ CHECK(CharacterRange::IsCanonical(&second_only));
+ CHECK(CharacterRange::IsCanonical(&both));
+
+ for (uc16 i = 0; i < offset; i++) {
+ bool in_first = CharacterInSet(&l1, i);
+ bool in_second = CharacterInSet(&l2, i);
+ CHECK((in_first && !in_second) == CharacterInSet(&first_only, i));
+ CHECK((!in_first && in_second) == CharacterInSet(&second_only, i));
+ CHECK((in_first && in_second) == CharacterInSet(&both, i));
+ }
+
+ first_only.Clear();
+ second_only.Clear();
+ both.Clear();
+
+ // Merge other direction.
+ CharacterRange::Merge(&l2, &l1, &second_only, &first_only, &both);
+
+ CHECK(CharacterRange::IsCanonical(&first_only));
+ CHECK(CharacterRange::IsCanonical(&second_only));
+ CHECK(CharacterRange::IsCanonical(&both));
+
+ for (uc16 i = 0; i < offset; i++) {
+ bool in_first = CharacterInSet(&l1, i);
+ bool in_second = CharacterInSet(&l2, i);
+ CHECK((in_first && !in_second) == CharacterInSet(&first_only, i));
+ CHECK((!in_first && in_second) == CharacterInSet(&second_only, i));
+ CHECK((in_first && in_second) == CharacterInSet(&both, i));
+ }
+
+ first_only.Clear();
+ second_only.Clear();
+ both.Clear();
+
+ // Merge but don't record all combinations.
+ CharacterRange::Merge(&l1, &l2, NULL, NULL, &both);
+
+ CHECK(CharacterRange::IsCanonical(&both));
+
+ for (uc16 i = 0; i < offset; i++) {
+ bool in_first = CharacterInSet(&l1, i);
+ bool in_second = CharacterInSet(&l2, i);
+ CHECK((in_first && in_second) == CharacterInSet(&both, i));
+ }
+
+ // Merge into same set.
+ ZoneList<CharacterRange> all(4);
+ CharacterRange::Merge(&l1, &l2, &all, &all, &all);
+
+ CHECK(CharacterRange::IsCanonical(&all));
+
+ for (uc16 i = 0; i < offset; i++) {
+ bool in_first = CharacterInSet(&l1, i);
+ bool in_second = CharacterInSet(&l2, i);
+ CHECK((in_first || in_second) == CharacterInSet(&all, i));
+ }
+}
TEST(Graph) {
diff --git a/deps/v8/test/cctest/test-serialize.cc b/deps/v8/test/cctest/test-serialize.cc
index d02972b83..c34840ace 100644
--- a/deps/v8/test/cctest/test-serialize.cc
+++ b/deps/v8/test/cctest/test-serialize.cc
@@ -1,4 +1,4 @@
-// Copyright 2007-2008 the V8 project authors. All rights reserved.
+// Copyright 2007-2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -39,6 +39,8 @@
#include "cctest.h"
#include "spaces.h"
#include "objects.h"
+#include "natives.h"
+#include "bootstrapper.h"
using namespace v8::internal;
@@ -117,10 +119,6 @@ TEST(ExternalReferenceEncoder) {
ExternalReference(&Counters::keyed_load_function_prototype);
CHECK_EQ(make_code(STATS_COUNTER, Counters::k_keyed_load_function_prototype),
encoder.Encode(keyed_load_function_prototype.address()));
- ExternalReference passed_function =
- ExternalReference::builtin_passed_function();
- CHECK_EQ(make_code(UNCLASSIFIED, 1),
- encoder.Encode(passed_function.address()));
ExternalReference the_hole_value_location =
ExternalReference::the_hole_value_location();
CHECK_EQ(make_code(UNCLASSIFIED, 2),
@@ -160,8 +158,6 @@ TEST(ExternalReferenceDecoder) {
decoder.Decode(
make_code(STATS_COUNTER,
Counters::k_keyed_load_function_prototype)));
- CHECK_EQ(ExternalReference::builtin_passed_function().address(),
- decoder.Decode(make_code(UNCLASSIFIED, 1)));
CHECK_EQ(ExternalReference::the_hole_value_location().address(),
decoder.Decode(make_code(UNCLASSIFIED, 2)));
CHECK_EQ(ExternalReference::address_of_stack_limit().address(),
@@ -175,6 +171,75 @@ TEST(ExternalReferenceDecoder) {
}
+class FileByteSink : public SnapshotByteSink {
+ public:
+ explicit FileByteSink(const char* snapshot_file) {
+ fp_ = OS::FOpen(snapshot_file, "wb");
+ file_name_ = snapshot_file;
+ if (fp_ == NULL) {
+ PrintF("Unable to write to snapshot file \"%s\"\n", snapshot_file);
+ exit(1);
+ }
+ }
+ virtual ~FileByteSink() {
+ if (fp_ != NULL) {
+ fclose(fp_);
+ }
+ }
+ virtual void Put(int byte, const char* description) {
+ if (fp_ != NULL) {
+ fputc(byte, fp_);
+ }
+ }
+ virtual int Position() {
+ return ftell(fp_);
+ }
+ void WriteSpaceUsed(
+ int new_space_used,
+ int pointer_space_used,
+ int data_space_used,
+ int code_space_used,
+ int map_space_used,
+ int cell_space_used,
+ int large_space_used);
+
+ private:
+ FILE* fp_;
+ const char* file_name_;
+};
+
+
+void FileByteSink::WriteSpaceUsed(
+ int new_space_used,
+ int pointer_space_used,
+ int data_space_used,
+ int code_space_used,
+ int map_space_used,
+ int cell_space_used,
+ int large_space_used) {
+ int file_name_length = StrLength(file_name_) + 10;
+ Vector<char> name = Vector<char>::New(file_name_length + 1);
+ OS::SNPrintF(name, "%s.size", file_name_);
+ FILE* fp = OS::FOpen(name.start(), "w");
+ fprintf(fp, "new %d\n", new_space_used);
+ fprintf(fp, "pointer %d\n", pointer_space_used);
+ fprintf(fp, "data %d\n", data_space_used);
+ fprintf(fp, "code %d\n", code_space_used);
+ fprintf(fp, "map %d\n", map_space_used);
+ fprintf(fp, "cell %d\n", cell_space_used);
+ fprintf(fp, "large %d\n", large_space_used);
+ fclose(fp);
+}
+
+
+static bool WriteToFile(const char* snapshot_file) {
+ FileByteSink file(snapshot_file);
+ StartupSerializer ser(&file);
+ ser.Serialize();
+ return true;
+}
+
+
static void Serialize() {
// We have to create one context. One reason for this is so that the builtins
// can be loaded from v8natives.js and their addresses can be processed. This
@@ -182,7 +247,7 @@ static void Serialize() {
// that would confuse the serialization/deserialization process.
v8::Persistent<v8::Context> env = v8::Context::New();
env.Dispose();
- Snapshot::WriteToFile(FLAG_testing_serialization_file);
+ WriteToFile(FLAG_testing_serialization_file);
}
@@ -279,45 +344,112 @@ DEPENDENT_TEST(DeserializeFromSecondSerializationAndRunScript2,
}
-class FileByteSink : public SnapshotByteSink {
- public:
- explicit FileByteSink(const char* snapshot_file) {
- fp_ = OS::FOpen(snapshot_file, "wb");
- if (fp_ == NULL) {
- PrintF("Unable to write to snapshot file \"%s\"\n", snapshot_file);
- exit(1);
- }
- }
- virtual ~FileByteSink() {
- if (fp_ != NULL) {
- fclose(fp_);
+TEST(PartialSerialization) {
+ Serializer::Enable();
+ v8::V8::Initialize();
+
+ v8::Persistent<v8::Context> env = v8::Context::New();
+ ASSERT(!env.IsEmpty());
+ env->Enter();
+ // Make sure all builtin scripts are cached.
+ { HandleScope scope;
+ for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
+ Bootstrapper::NativesSourceLookup(i);
}
}
- virtual void Put(int byte, const char* description) {
- if (fp_ != NULL) {
- fputc(byte, fp_);
- }
+ Heap::CollectAllGarbage(true);
+ Heap::CollectAllGarbage(true);
+
+ Object* raw_foo;
+ {
+ v8::HandleScope handle_scope;
+ v8::Local<v8::String> foo = v8::String::New("foo");
+ ASSERT(!foo.IsEmpty());
+ raw_foo = *(v8::Utils::OpenHandle(*foo));
}
- private:
- FILE* fp_;
-};
+ int file_name_length = StrLength(FLAG_testing_serialization_file) + 10;
+ Vector<char> startup_name = Vector<char>::New(file_name_length + 1);
+ OS::SNPrintF(startup_name, "%s.startup", FLAG_testing_serialization_file);
+ env->Exit();
+ env.Dispose();
-TEST(PartialSerialization) {
- Serializer::Enable();
- v8::V8::Initialize();
- v8::Persistent<v8::Context> env = v8::Context::New();
- env->Enter();
+ FileByteSink startup_sink(startup_name.start());
+ StartupSerializer startup_serializer(&startup_sink);
+ startup_serializer.SerializeStrongReferences();
+
+ FileByteSink partial_sink(FLAG_testing_serialization_file);
+ PartialSerializer p_ser(&startup_serializer, &partial_sink);
+ p_ser.Serialize(&raw_foo);
+ startup_serializer.SerializeWeakReferences();
+ partial_sink.WriteSpaceUsed(p_ser.CurrentAllocationAddress(NEW_SPACE),
+ p_ser.CurrentAllocationAddress(OLD_POINTER_SPACE),
+ p_ser.CurrentAllocationAddress(OLD_DATA_SPACE),
+ p_ser.CurrentAllocationAddress(CODE_SPACE),
+ p_ser.CurrentAllocationAddress(MAP_SPACE),
+ p_ser.CurrentAllocationAddress(CELL_SPACE),
+ p_ser.CurrentAllocationAddress(LO_SPACE));
+}
- v8::HandleScope handle_scope;
- v8::Local<v8::String> foo = v8::String::New("foo");
- FileByteSink file(FLAG_testing_serialization_file);
- Serializer ser(&file);
- i::Handle<i::String> internal_foo = v8::Utils::OpenHandle(*foo);
- Object* raw_foo = *internal_foo;
- ser.SerializePartial(&raw_foo);
+DEPENDENT_TEST(PartialDeserialization, PartialSerialization) {
+ int file_name_length = StrLength(FLAG_testing_serialization_file) + 10;
+ Vector<char> startup_name = Vector<char>::New(file_name_length + 1);
+ OS::SNPrintF(startup_name, "%s.startup", FLAG_testing_serialization_file);
+
+ CHECK(Snapshot::Initialize(startup_name.start()));
+
+ const char* file_name = FLAG_testing_serialization_file;
+ Vector<char> name = Vector<char>::New(file_name_length + 1);
+ OS::SNPrintF(name, "%s.size", file_name);
+ FILE* fp = OS::FOpen(name.start(), "r");
+ int new_size, pointer_size, data_size, code_size, map_size, cell_size;
+ int large_size;
+#ifdef _MSC_VER
+ // Avoid warning about unsafe fscanf from MSVC.
+ // Please note that this is only fine if %c and %s are not being used.
+#define fscanf fscanf_s
+#endif
+ CHECK_EQ(1, fscanf(fp, "new %d\n", &new_size));
+ CHECK_EQ(1, fscanf(fp, "pointer %d\n", &pointer_size));
+ CHECK_EQ(1, fscanf(fp, "data %d\n", &data_size));
+ CHECK_EQ(1, fscanf(fp, "code %d\n", &code_size));
+ CHECK_EQ(1, fscanf(fp, "map %d\n", &map_size));
+ CHECK_EQ(1, fscanf(fp, "cell %d\n", &cell_size));
+ CHECK_EQ(1, fscanf(fp, "large %d\n", &large_size));
+#ifdef _MSC_VER
+#undef fscanf
+#endif
+ fclose(fp);
+ Heap::ReserveSpace(new_size,
+ pointer_size,
+ data_size,
+ code_size,
+ map_size,
+ cell_size,
+ large_size);
+ int snapshot_size = 0;
+ byte* snapshot = ReadBytes(file_name, &snapshot_size);
+
+ Object* root;
+ {
+ SnapshotByteSource source(snapshot, snapshot_size);
+ Deserializer deserializer(&source);
+ deserializer.DeserializePartial(&root);
+ CHECK(root->IsString());
+ }
+ v8::HandleScope handle_scope;
+ Handle<Object>root_handle(root);
+
+ Object* root2;
+ {
+ SnapshotByteSource source(snapshot, snapshot_size);
+ Deserializer deserializer(&source);
+ deserializer.DeserializePartial(&root2);
+ CHECK(root2->IsString());
+ CHECK(*root_handle == root2);
+ }
}
diff --git a/deps/v8/test/es5conform/README b/deps/v8/test/es5conform/README
index a88f4a368..9cfc92b4b 100644
--- a/deps/v8/test/es5conform/README
+++ b/deps/v8/test/es5conform/README
@@ -4,7 +4,7 @@ tests from
https://es5conform.svn.codeplex.com/svn
-in revision 59101 as 'data' in this directory. Using later version
+in revision 62998 as 'data' in this directory. Using later version
may be possible but the tests are only known to pass (and indeed run)
with that revision.
diff --git a/deps/v8/test/es5conform/es5conform.status b/deps/v8/test/es5conform/es5conform.status
index 3fc1e0ad6..a755016e7 100644
--- a/deps/v8/test/es5conform/es5conform.status
+++ b/deps/v8/test/es5conform/es5conform.status
@@ -38,7 +38,6 @@ chapter13: UNIMPLEMENTED
chapter14: UNIMPLEMENTED
chapter15/15.1: UNIMPLEMENTED
chapter15/15.2/15.2.3/15.2.3.1: UNIMPLEMENTED
-chapter15/15.2/15.2.3/15.2.3.4: UNIMPLEMENTED
chapter15/15.2/15.2.3/15.2.3.5: UNIMPLEMENTED
chapter15/15.2/15.2.3/15.2.3.6: UNIMPLEMENTED
chapter15/15.2/15.2.3/15.2.3.7: UNIMPLEMENTED
@@ -141,6 +140,117 @@ chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-220: FAIL_OK
chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-221: FAIL_OK
chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-222: FAIL_OK
+# Object.getOwnPropertyNames
+chapter15/15.2/15.2.3/15.2.3.4: PASS
+
+# All of the tests below marked SUBSETFAIL (in 15.2.3.4) fail because
+# the tests assumes that objects can not have more properties
+# than those described in the spec - but according to spec they can
+# have additional properties.
+# All compareArray calls in these tests could be exchanged with a
+# isSubsetOfArray call (I will upload a path to the es5conform site)
+
+# SUBSETFAIL
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-1: FAIL_OK
+
+# SUBSETFAIL + we do not implement all methods on Object
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-2: FAIL_OK
+
+# SUBSETFAIL
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-3: FAIL_OK
+
+# SUBSETFAIL + we do not implement Function.prototype.bind
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-4: FAIL_OK
+
+# SUBSETFAIL
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-5: FAIL_OK
+
+# SUBSETFAIL
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-6: FAIL_OK
+
+# SUBSETFAIL
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-7: FAIL_OK
+
+# SUBSETFAIL
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-8: FAIL_OK
+
+# SUBSETFAIL
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-9: FAIL_OK
+
+# SUBSETFAIL
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-11: FAIL_OK
+
+# We do not implement all methods on RegExp
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-13: FAIL_OK
+
+# SUBSETFAIL
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-14: FAIL_OK
+
+# EvalError.prototype does not have message property
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-15: FAIL_OK
+
+# Rangeerror.prototype does not have message property
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-16: FAIL_OK
+
+# ReferenceError.prototype does not have message property
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-17: FAIL_OK
+
+# SyntaxError.prototype does not have message property
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-18: FAIL_OK
+
+# TypeError.prototype does not have message property
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-19: FAIL_OK
+
+# URIError.prototype does not have message property
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-20: FAIL_OK
+
+# SUBSETFAIL
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-22: FAIL_OK
+
+# SUBSETFAIL
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-23: FAIL_OK
+
+# SUBSETFAIL
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-24: FAIL_OK
+
+# SUBSETFAIL
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-25: FAIL_OK
+
+# SUBSETFAIL
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-26: FAIL_OK
+
+# SUBSETFAIL
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-27: FAIL_OK
+
+# SUBSETFAIL
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-28: FAIL_OK
+
+# SUBSETFAIL
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-29: FAIL_OK
+
+# SUBSETFAIL
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-30: FAIL_OK
+
+# SUBSETFAIL
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-31: FAIL_OK
+
+# SUBSETFAIL
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-32: FAIL_OK
+
+# SUBSETFAIL
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-33: FAIL_OK
+
+# SUBSETFAIL
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-34: FAIL_OK
+
+# SUBSETFAIL
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-35: FAIL_OK
+
+# getOwnPropertyDescriptor not implemented on array indices
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-b-1: FAIL_OK
+
+
+
# Object.keys
chapter15/15.2/15.2.3/15.2.3.14: PASS
diff --git a/deps/v8/test/mjsunit/compiler/short-circuit.js b/deps/v8/test/mjsunit/compiler/short-circuit.js
new file mode 100644
index 000000000..42100e765
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/short-circuit.js
@@ -0,0 +1,102 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test some expression contexts involving short-circuit boolean
+// operations that did not otherwise have test coverage.
+
+
+var x = 42;
+
+// Literals in value/test context.
+assertEquals(x, function () { return 0 || x }());
+assertEquals(1, function () { return 1 || x }());
+
+// Literals in test/value context.
+assertEquals(0, function () { return 0 && x }());
+assertEquals(x, function () { return 1 && x }());
+
+// A value on top of the stack in value/test context.
+assertEquals(x, function(y) { return y++ || x }(0));
+assertEquals(1, function(y) { return y++ || x }(1));
+
+// A value on top of the stack in a test/value context.
+assertEquals(0, function(y) { return y++ && x }(0));
+assertEquals(x, function(y) { return y++ && x }(1));
+
+// An object literal in value context.
+assertEquals(0, function () { return {x: 0}}().x);
+
+// An object literal in value/test context.
+assertEquals(0, function () { return {x: 0} || this }().x);
+
+// An object literal in test/value context.
+assertEquals(x, function () { return {x: 0} && this }().x);
+
+// An array literal in value/test context.
+assertEquals(0, function () { return [0,1] || new Array(x,1) }()[0]);
+
+// An array literal in test/value context.
+assertEquals(x, function () { return [0,1] && new Array(x,1) }()[0]);
+
+// Slot assignment in value/test context.
+assertEquals(x, function (y) { return (y = 0) || x }("?"));
+assertEquals(1, function (y) { return (y = 1) || x }("?"));
+
+// Slot assignment in test/value context.
+assertEquals(0, function (y) { return (y = 0) && x }("?"));
+assertEquals(x, function (y) { return (y = 1) && x }("?"));
+
+// void in value context.
+assertEquals(void 0, function () { return void x }());
+
+// void in value/test context.
+assertEquals(x, function () { return (void x) || x }());
+
+// void in test/value context.
+assertEquals(void 0, function () { return (void x) && x }());
+
+// Unary not in value context.
+assertEquals(false, function () { return !x }());
+
+// Unary not in value/test context.
+assertEquals(true, function (y) { return !y || x }(0));
+assertEquals(x, function (y) { return !y || x }(1));
+
+// Unary not in test/value context.
+assertEquals(x, function (y) { return !y && x }(0));
+assertEquals(false, function (y) { return !y && x }(1));
+
+// Comparison in value context.
+assertEquals(false, function () { return x < x; }());
+
+// Comparison in value/test context.
+assertEquals(x, function () { return x < x || x; }());
+assertEquals(true, function () { return x <= x || x; }());
+
+// Comparison in test/value context.
+assertEquals(false, function () { return x < x && x; }());
+assertEquals(x, function () { return x <= x && x; }());
diff --git a/deps/v8/test/mjsunit/compiler/thisfunction.js b/deps/v8/test/mjsunit/compiler/thisfunction.js
index 2af846f3e..098fc3a4e 100644
--- a/deps/v8/test/mjsunit/compiler/thisfunction.js
+++ b/deps/v8/test/mjsunit/compiler/thisfunction.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --always_fast_compiler
+// Flags: --always-full-compiler
// Test reference to this-function.
diff --git a/deps/v8/test/mjsunit/compiler/unary-add.js b/deps/v8/test/mjsunit/compiler/unary-add.js
new file mode 100644
index 000000000..b1fc0c2ca
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/unary-add.js
@@ -0,0 +1,67 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test unary addition in various contexts.
+
+// Test value context.
+assertEquals(1, +'1');
+assertEquals(1, +1);
+assertEquals(1.12, +1.12);
+assertEquals(NaN, +undefined);
+assertEquals(NaN, +{});
+
+// Test effect context.
+assertEquals(1, eval("+'1'; 1"));
+assertEquals(1, eval("+1; 1"));
+assertEquals(1, eval("+1.12; 1"));
+assertEquals(1, eval("+undefined; 1"));
+assertEquals(1, eval("+{}; 1"));
+
+// Test test context.
+assertEquals(1, (+'1') ? 1 : 2);
+assertEquals(1, (+1) ? 1 : 2);
+assertEquals(1, (+'0') ? 2 : 1);
+assertEquals(1, (+0) ? 2 : 1);
+assertEquals(1, (+1.12) ? 1 : 2);
+assertEquals(1, (+undefined) ? 2 : 1);
+assertEquals(1, (+{}) ? 2 : 1);
+
+// Test value/test context.
+assertEquals(1, +'1' || 2);
+assertEquals(1, +1 || 2);
+assertEquals(1.12, +1.12 || 2);
+assertEquals(2, +undefined || 2);
+assertEquals(2, +{} || 2);
+
+// Test test/value context.
+assertEquals(2, +'1' && 2);
+assertEquals(2, +1 && 2);
+assertEquals(0, +'0' && 2);
+assertEquals(0, +0 && 2);
+assertEquals(2, +1.12 && 2);
+assertEquals(NaN, +undefined && 2);
+assertEquals(NaN, +{} && 2);
diff --git a/deps/v8/test/mjsunit/debug-compile-event-newfunction.js b/deps/v8/test/mjsunit/debug-compile-event-newfunction.js
new file mode 100644
index 000000000..fb43a87f7
--- /dev/null
+++ b/deps/v8/test/mjsunit/debug-compile-event-newfunction.js
@@ -0,0 +1,68 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+// Get the Debug object exposed from the debug context global object.
+Debug = debug.Debug
+
+var exception = null; // Exception in debug event listener.
+
+function listener(event, exec_state, event_data, data) {
+ try {
+ if (event == Debug.DebugEvent.AfterCompile) {
+ assertEquals(Debug.ScriptCompilationType.Eval,
+ event_data.script().compilationType(),
+ 'Wrong compilationType');
+ var evalFromScript = event_data.script().evalFromScript();
+ assertTrue(!!evalFromScript, ' evalFromScript ');
+ assertFalse(evalFromScript.isUndefined(), 'evalFromScript.isUndefined()');
+ assertTrue(/debug-compile-event-newfunction.js$/.test(
+ evalFromScript.name()),
+ 'Wrong eval from script name.');
+
+ var evalFromLocation = event_data.script().evalFromLocation();
+ assertTrue(!!evalFromLocation, 'evalFromLocation is undefined');
+ assertEquals(63, evalFromLocation.line);
+
+ // Check that the event can be serialized without exceptions.
+ var json = event_data.toJSONProtocol();
+ }
+ } catch (e) {
+ exception = e
+ }
+};
+
+
+// Add the debug event listener.
+Debug.setListener(listener);
+
+// Create a function from its body text. It will lead to an eval.
+new Function('arg1', 'return arg1 + 1;');
+
+assertNull(exception, "exception in listener");
+
+Debug.setListener(null);
diff --git a/deps/v8/test/mjsunit/debug-compile-event.js b/deps/v8/test/mjsunit/debug-compile-event.js
index 4804ac772..071183bf6 100644
--- a/deps/v8/test/mjsunit/debug-compile-event.js
+++ b/deps/v8/test/mjsunit/debug-compile-event.js
@@ -107,7 +107,7 @@ compileSource('eval("a=2")');
source_count++; // Using eval causes additional compilation event.
compileSource('eval("eval(\'(function(){return a;})\')")');
source_count += 2; // Using eval causes additional compilation event.
-compileSource('JSON.parse("{a:1,b:2}")');
+compileSource('JSON.parse(\'{"a":1,"b":2}\')');
source_count++; // Using JSON.parse causes additional compilation event.
// Make sure that the debug event listener was invoked.
diff --git a/deps/v8/test/mjsunit/debug-step.js b/deps/v8/test/mjsunit/debug-step.js
index 453421864..a887514a0 100644
--- a/deps/v8/test/mjsunit/debug-step.js
+++ b/deps/v8/test/mjsunit/debug-step.js
@@ -79,4 +79,4 @@ f();
assertEquals(0, result);
// Get rid of the debug event listener.
-Debug.setListener(null); \ No newline at end of file
+Debug.setListener(null);
diff --git a/deps/v8/test/mjsunit/bugs/bug-223.js b/deps/v8/test/mjsunit/for.js
index 04b296b9b..0b7158086 100644
--- a/deps/v8/test/mjsunit/bugs/bug-223.js
+++ b/deps/v8/test/mjsunit/for.js
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,15 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// When calling user-defined functions on strings, booleans or
-// numbers, we should create a wrapper object.
-
-function TypeOfThis() { return typeof this; }
-
-String.prototype.TypeOfThis = TypeOfThis;
-Boolean.prototype.TypeOfThis = TypeOfThis;
-Number.prototype.TypeOfThis = TypeOfThis;
-
-assertEquals('object', 'xxx'.TypeOfThis());
-assertEquals('object', true.TypeOfThis());
-assertEquals('object', (42).TypeOfThis());
+// Test missing condition in for loop.
+for (var i = 0; ; i++) {
+ if (i > 100) break;
+}
+assertEquals(101, i);
diff --git a/deps/v8/test/mjsunit/json.js b/deps/v8/test/mjsunit/json.js
index 35e16340c..56562e769 100644
--- a/deps/v8/test/mjsunit/json.js
+++ b/deps/v8/test/mjsunit/json.js
@@ -93,20 +93,46 @@ for (var p in this)
assertFalse(p == "JSON");
// Parse
-
assertEquals({}, JSON.parse("{}"));
+assertEquals({42:37}, JSON.parse('{"42":37}'));
assertEquals(null, JSON.parse("null"));
assertEquals(true, JSON.parse("true"));
assertEquals(false, JSON.parse("false"));
assertEquals("foo", JSON.parse('"foo"'));
assertEquals("f\no", JSON.parse('"f\\no"'));
+assertEquals("\b\f\n\r\t\"\u2028\/\\",
+ JSON.parse('"\\b\\f\\n\\r\\t\\"\\u2028\\/\\\\"'));
+assertEquals([1.1], JSON.parse("[1.1]"));
+assertEquals([1], JSON.parse("[1.0]"));
+
+assertEquals(0, JSON.parse("0"));
+assertEquals(1, JSON.parse("1"));
+assertEquals(0.1, JSON.parse("0.1"));
assertEquals(1.1, JSON.parse("1.1"));
-assertEquals(1, JSON.parse("1.0"));
-assertEquals(0.0000000003, JSON.parse("3e-10"));
+assertEquals(1.1, JSON.parse("1.100000"));
+assertEquals(1.111111, JSON.parse("1.111111"));
+assertEquals(-0, JSON.parse("-0"));
+assertEquals(-1, JSON.parse("-1"));
+assertEquals(-0.1, JSON.parse("-0.1"));
+assertEquals(-1.1, JSON.parse("-1.1"));
+assertEquals(-1.1, JSON.parse("-1.100000"));
+assertEquals(-1.111111, JSON.parse("-1.111111"));
+assertEquals(11, JSON.parse("1.1e1"));
+assertEquals(11, JSON.parse("1.1e+1"));
+assertEquals(0.11, JSON.parse("1.1e-1"));
+assertEquals(11, JSON.parse("1.1E1"));
+assertEquals(11, JSON.parse("1.1E+1"));
+assertEquals(0.11, JSON.parse("1.1E-1"));
+
assertEquals([], JSON.parse("[]"));
assertEquals([1], JSON.parse("[1]"));
assertEquals([1, "2", true, null], JSON.parse('[1, "2", true, null]'));
+assertEquals("", JSON.parse('""'));
+assertEquals(["", "", -0, ""], JSON.parse('[ "" , "" , -0, ""]'));
+assertEquals("", JSON.parse('""'));
+
+
function GetFilter(name) {
function Filter(key, value) {
return (key == name) ? undefined : value;
@@ -145,6 +171,64 @@ TestInvalid('function () { return 0; }');
TestInvalid("[1, 2");
TestInvalid('{"x": 3');
+// JavaScript number literals not valid in JSON.
+TestInvalid('[01]');
+TestInvalid('[.1]');
+TestInvalid('[1.]');
+TestInvalid('[1.e1]');
+TestInvalid('[-.1]');
+TestInvalid('[-1.]');
+
+// Plain invalid number literals.
+TestInvalid('-');
+TestInvalid('--1');
+TestInvalid('-1e');
+TestInvalid('1e--1]');
+TestInvalid('1e+-1');
+TestInvalid('1e-+1');
+TestInvalid('1e++1');
+
+// JavaScript string literals not valid in JSON.
+TestInvalid("'single quote'"); // Valid JavaScript
+TestInvalid('"\\a invalid escape"');
+TestInvalid('"\\v invalid escape"'); // Valid JavaScript
+TestInvalid('"\\\' invalid escape"'); // Valid JavaScript
+TestInvalid('"\\x42 invalid escape"'); // Valid JavaScript
+TestInvalid('"\\u202 invalid escape"');
+TestInvalid('"\\012 invalid escape"');
+TestInvalid('"Unterminated string');
+TestInvalid('"Unterminated string\\"');
+TestInvalid('"Unterminated string\\\\\\"');
+
+// Test bad JSON that would be good JavaScript (ES5).
+
+TestInvalid("{true:42}");
+TestInvalid("{false:42}");
+TestInvalid("{null:42}");
+TestInvalid("{'foo':42}");
+TestInvalid("{42:42}");
+TestInvalid("{0:42}");
+TestInvalid("{-1:42}");
+
+// Test for trailing garbage detection.
+
+TestInvalid('42 px');
+TestInvalid('42 .2');
+TestInvalid('42 2');
+TestInvalid('42 e1');
+TestInvalid('"42" ""');
+TestInvalid('"42" ""');
+TestInvalid('"" ""');
+TestInvalid('true ""');
+TestInvalid('false ""');
+TestInvalid('null ""');
+TestInvalid('null ""');
+TestInvalid('[] ""');
+TestInvalid('[true] ""');
+TestInvalid('{} ""');
+TestInvalid('{"x":true} ""');
+TestInvalid('"Garbage""After string"');
+
// Stringify
assertEquals("true", JSON.stringify(true));
@@ -196,12 +280,8 @@ assertEquals('{"y":6,"x":5}', JSON.stringify({x:5,y:6}, ['y', 'x']));
assertEquals(undefined, JSON.stringify(undefined));
assertEquals(undefined, JSON.stringify(function () { }));
-function checkIllegal(str) {
- assertThrows(function () { JSON.parse(str); }, SyntaxError);
-}
-
-checkIllegal('1); throw "foo"; (1');
+TestInvalid('1); throw "foo"; (1');
var x = 0;
eval("(1); x++; (1)");
-checkIllegal('1); x++; (1');
+TestInvalid('1); x++; (1');
diff --git a/deps/v8/test/mjsunit/math-round.js b/deps/v8/test/mjsunit/math-round.js
new file mode 100644
index 000000000..d80a1036f
--- /dev/null
+++ b/deps/v8/test/mjsunit/math-round.js
@@ -0,0 +1,52 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+assertEquals(0, Math.round(0));
+assertEquals(-0, Math.round(-0));
+assertEquals(Infinity, Math.round(Infinity));
+assertEquals(-Infinity, Math.round(-Infinity));
+assertNaN(Math.round(NaN));
+
+assertEquals(1, Math.round(0.5));
+assertEquals(1, Math.round(0.7));
+assertEquals(1, Math.round(1));
+assertEquals(1, Math.round(1.1));
+assertEquals(1, Math.round(1.49999));
+assertEquals(1/-0, 1/Math.round(-0.5)); // Test for -0 result.
+assertEquals(-1, Math.round(-0.5000000000000001));
+assertEquals(-1, Math.round(-0.7));
+assertEquals(-1, Math.round(-1));
+assertEquals(-1, Math.round(-1.1));
+assertEquals(-1, Math.round(-1.49999));
+assertEquals(-1, Math.round(-1.5));
+
+assertEquals(9007199254740990, Math.round(9007199254740990));
+assertEquals(9007199254740991, Math.round(9007199254740991));
+assertEquals(-9007199254740990, Math.round(-9007199254740990));
+assertEquals(-9007199254740991, Math.round(-9007199254740991));
+assertEquals(Number.MAX_VALUE, Math.round(Number.MAX_VALUE));
+assertEquals(-Number.MAX_VALUE, Math.round(-Number.MAX_VALUE));
diff --git a/deps/v8/test/mjsunit/mirror-script.js b/deps/v8/test/mjsunit/mirror-script.js
index 3208f16c3..8631028e4 100644
--- a/deps/v8/test/mjsunit/mirror-script.js
+++ b/deps/v8/test/mjsunit/mirror-script.js
@@ -87,8 +87,8 @@ testScriptMirror(function(){}, 'mirror-script.js', 100, 2, 0);
testScriptMirror(Math.sin, 'native math.js', -1, 0, 0);
testScriptMirror(eval('(function(){})'), null, 1, 2, 1, '(function(){})', 87);
testScriptMirror(eval('(function(){\n })'), null, 2, 2, 1, '(function(){\n })', 88);
-testScriptMirror(%CompileString("({a:1,b:2})", true), null, 1, 2, 2, '({a:1,b:2})');
-testScriptMirror(%CompileString("({a:1,\n b:2})", true), null, 2, 2, 2, '({a:1,\n b:2})');
+testScriptMirror(%CompileString('{"a":1,"b":2}', true), null, 1, 2, 2, '{"a":1,"b":2}');
+testScriptMirror(%CompileString('{"a":1,\n "b":2}', true), null, 2, 2, 2, '{"a":1,\n "b":2}');
// Test taking slices of source.
var mirror = debug.MakeMirror(eval('(function(){\n 1;\n})')).script();
diff --git a/deps/v8/test/mjsunit/mjsunit.js b/deps/v8/test/mjsunit/mjsunit.js
index 8ced0119f..07c4e7eff 100644
--- a/deps/v8/test/mjsunit/mjsunit.js
+++ b/deps/v8/test/mjsunit/mjsunit.js
@@ -75,6 +75,7 @@ function deepEquals(a, b) {
if (typeof a == "number" && typeof b == "number" && isNaN(a) && isNaN(b)) {
return true;
}
+ if (a == null || b == null) return false;
if (a.constructor === RegExp || b.constructor === RegExp) {
return (a.constructor === b.constructor) && (a.toString === b.toString);
}
diff --git a/deps/v8/test/mjsunit/mjsunit.status b/deps/v8/test/mjsunit/mjsunit.status
index 41388a37f..f1752b9f5 100644
--- a/deps/v8/test/mjsunit/mjsunit.status
+++ b/deps/v8/test/mjsunit/mjsunit.status
@@ -45,8 +45,8 @@ array-constructor: PASS || TIMEOUT
# Very slow on ARM, contains no architecture dependent code.
unicode-case-overoptimization: PASS, TIMEOUT if ($arch == arm)
-# Skip long running test in debug.
-regress/regress-524: PASS, SKIP if $mode == debug
+# Skip long running test in debug and allow it to timeout in release mode.
+regress/regress-524: (PASS || TIMEOUT), SKIP if $mode == debug
[ $arch == arm ]
diff --git a/deps/v8/test/mjsunit/object-get-own-property-names.js b/deps/v8/test/mjsunit/object-get-own-property-names.js
new file mode 100644
index 000000000..f52cee2f9
--- /dev/null
+++ b/deps/v8/test/mjsunit/object-get-own-property-names.js
@@ -0,0 +1,104 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test ES5 section 15.2.3.4 Object.getOwnPropertyNames.
+
+// Check simple cases.
+var obj = { a: 1, b: 2};
+var propertyNames = Object.getOwnPropertyNames(obj);
+propertyNames.sort();
+assertEquals(2, propertyNames.length);
+assertEquals("a", propertyNames[0]);
+assertEquals("b", propertyNames[1]);
+
+var obj = { a: function(){}, b: function(){} };
+var propertyNames = Object.getOwnPropertyNames(obj);
+propertyNames.sort();
+assertEquals(2, propertyNames.length);
+assertEquals("a", propertyNames[0]);
+assertEquals("b", propertyNames[1]);
+
+// Check slow case
+var obj = { a: 1, b: 2, c: 3 };
+delete obj.b;
+var propertyNames = Object.getOwnPropertyNames(obj)
+propertyNames.sort();
+assertEquals(2, propertyNames.length);
+assertEquals("a", propertyNames[0]);
+assertEquals("c", propertyNames[1]);
+
+// Check that non-enumerable properties are being returned.
+var propertyNames = Object.getOwnPropertyNames([1, 2]);
+propertyNames.sort();
+assertEquals(3, propertyNames.length);
+assertEquals("0", propertyNames[0]);
+assertEquals("1", propertyNames[1]);
+assertEquals("length", propertyNames[2]);
+
+// Check that no proto properties are returned.
+var obj = { foo: "foo" };
+obj.__proto__ = { bar: "bar" };
+propertyNames = Object.getOwnPropertyNames(obj);
+propertyNames.sort();
+assertEquals(1, propertyNames.length);
+assertEquals("foo", propertyNames[0]);
+
+// Check that getter properties are returned.
+var obj = {};
+obj.__defineGetter__("getter", function() {});
+propertyNames = Object.getOwnPropertyNames(obj);
+propertyNames.sort();
+assertEquals(1, propertyNames.length);
+assertEquals("getter", propertyNames[0]);
+
+try {
+ Object.getOwnPropertyNames(4);
+ assertTrue(false);
+} catch (e) {
+ assertTrue(/on non-object/.test(e));
+}
+
+try {
+ Object.getOwnPropertyNames("foo");
+ assertTrue(false);
+} catch (e) {
+ assertTrue(/on non-object/.test(e));
+}
+
+try {
+ Object.getOwnPropertyNames(undefined);
+ assertTrue(false);
+} catch (e) {
+ assertTrue(/on non-object/.test(e));
+}
+
+try {
+ Object.getOwnPropertyNames(null);
+ assertTrue(false);
+} catch (e) {
+ assertTrue(/on non-object/.test(e));
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-580.js b/deps/v8/test/mjsunit/regress/regress-580.js
new file mode 100644
index 000000000..c6b3db7ad
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-580.js
@@ -0,0 +1,55 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test constant folding of smi operations that overflow a 32-bit int
+// See http://code.google.com/p/v8/issues/detail?id=580
+
+function num_ops() {
+ var x;
+ var tmp = 0;
+ x = (tmp = 1578221999, tmp)+(tmp = 572285336, tmp);
+ assertEquals(2150507335, x);
+ x = 1578221999 + 572285336;
+ assertEquals(2150507335, x);
+
+ x = (tmp = -1500000000, tmp)+(tmp = -2000000000, tmp);
+ assertEquals(-3500000000, x);
+ x = -1500000000 + -2000000000;
+ assertEquals(-3500000000, x);
+
+ x = (tmp = 1578221999, tmp)-(tmp = -572285336, tmp);
+ assertEquals(2150507335, x);
+ x = 1578221999 - -572285336;
+ assertEquals(2150507335, x);
+
+ x = (tmp = -1500000000, tmp)-(tmp = 2000000000, tmp);
+ assertEquals(-3500000000, x);
+ x = -1500000000 - 2000000000;
+ assertEquals(-3500000000, x);
+}
+
+num_ops();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-3184.js b/deps/v8/test/mjsunit/regress/regress-crbug-3184.js
new file mode 100644
index 000000000..ed78183f7
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-3184.js
@@ -0,0 +1,83 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Object.extend = function (dest, source) {
+ for (property in source) dest[property] = source[property];
+ return dest;
+};
+
+Object.extend ( Function.prototype,
+{
+ wrap : function (wrapper) {
+ var method = this;
+ var bmethod = (function(_method) {
+ return function () {
+ this.$$$parentMethodStore$$$ = this.$proceed;
+ this.$proceed = function() { return _method.apply(this, arguments); };
+ };
+ })(method);
+ var amethod = function () {
+ this.$proceed = this.$$$parentMethodStore$$$;
+ if (this.$proceed == undefined) delete this.$proceed;
+ delete this.$$$parentMethodStore$$$;
+ };
+ var value = function() { bmethod.call(this); retval = wrapper.apply(this, arguments); amethod.call(this); return retval; };
+ return value;
+ }
+});
+
+String.prototype.cap = function() {
+ return this.charAt(0).toUpperCase() + this.substring(1).toLowerCase();
+};
+
+String.prototype.cap = String.prototype.cap.wrap(
+ function(each) {
+ if (each && this.indexOf(" ") != -1) {
+ return this.split(" ").map(
+ function (value) {
+ return value.cap();
+ }
+ ).join(" ");
+ } else {
+ return this.$proceed();
+ }
+});
+
+Object.extend( Array.prototype,
+{
+ map : function(fun) {
+ if (typeof fun != "function") throw new TypeError();
+ var len = this.length;
+ var res = new Array(len);
+ var thisp = arguments[1];
+ for (var i = 0; i < len; i++) { if (i in this) res[i] = fun.call(thisp, this[i], i, this); }
+ return res;
+ }
+});
+assertEquals("Test1 test1", "test1 test1".cap());
+assertEquals("Test2 Test2", "test2 test2".cap(true));
+
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-3867.js b/deps/v8/test/mjsunit/regress/regress-crbug-3867.js
new file mode 100644
index 000000000..03001b6c7
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-3867.js
@@ -0,0 +1,77 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+function props(x) {
+ var result = [];
+ for (var p in x) result.push(p);
+ return result;
+}
+
+function A() {
+ this.a1 = 1234;
+ this.a2 = "D";
+ this.a3 = false;
+}
+
+function B() {
+ this.b3 = false;
+ this.b2 = "D";
+ this.b1 = 1234;
+}
+
+function C() {
+ this.c3 = false;
+ this.c1 = 1234;
+ this.c2 = "D";
+}
+
+assertArrayEquals(["a1", "a2", "a3"], props(new A()));
+assertArrayEquals(["b3", "b2", "b1"], props(new B()));
+assertArrayEquals(["c3", "c1", "c2"], props(new C()));
+assertArrayEquals(["s1", "s2", "s3"], props({s1: 0, s2: 0, s3: 0}));
+assertArrayEquals(["s3", "s2", "s1"], props({s3: 0, s2: 0, s1: 0}));
+assertArrayEquals(["s3", "s1", "s2"], props({s3: 0, s1: 0, s2: 0}));
+
+var a = new A()
+a.a0 = 0;
+a.a4 = 0;
+assertArrayEquals(["a1", "a2", "a3", "a0", "a4"], props(a));
+
+var b = new B()
+b.b4 = 0;
+b.b0 = 0;
+assertArrayEquals(["b3", "b2", "b1", "b4", "b0"], props(b));
+
+var o1 = {s1: 0, s2: 0, s3: 0}
+o1.s0 = 0;
+o1.s4 = 0;
+assertArrayEquals(["s1", "s2", "s3", "s0", "s4"], props(o1));
+
+var o2 = {s3: 0, s2: 0, s1: 0}
+o2.s4 = 0;
+o2.s0 = 0;
+assertArrayEquals(["s3", "s2", "s1", "s4", "s0"], props(o2));
diff --git a/deps/v8/test/mjsunit/tools/csvparser.js b/deps/v8/test/mjsunit/tools/csvparser.js
index db3a2eba3..6ac490805 100644
--- a/deps/v8/test/mjsunit/tools/csvparser.js
+++ b/deps/v8/test/mjsunit/tools/csvparser.js
@@ -77,3 +77,7 @@ assertEquals(
assertEquals(
['code-creation','RegExp','0xf6c21c00','826','NccyrJroXvg\\/([^,]*)'],
parser.parseLine('code-creation,RegExp,0xf6c21c00,826,"NccyrJroXvg\\/([^,]*)"'));
+
+assertEquals(
+ ['code-creation','Function','0x42f0a0','163',''],
+ parser.parseLine('code-creation,Function,0x42f0a0,163,""'));
diff --git a/deps/v8/test/mjsunit/tools/logreader.js b/deps/v8/test/mjsunit/tools/logreader.js
index 8ed5ffd26..8b7478951 100644
--- a/deps/v8/test/mjsunit/tools/logreader.js
+++ b/deps/v8/test/mjsunit/tools/logreader.js
@@ -67,7 +67,7 @@
var reader = new devtools.profiler.LogReader({});
assertEquals([0x10000000, 0x10001000, 0xffff000, 0x10000000],
- reader.processStack(0x10000000, ['overflow',
+ reader.processStack(0x10000000, 0, ['overflow',
'+1000', '-2000', '+1000']));
})();
diff --git a/deps/v8/test/mjsunit/tools/tickprocessor-test.func-info b/deps/v8/test/mjsunit/tools/tickprocessor-test.func-info
new file mode 100644
index 000000000..a66b90f4c
--- /dev/null
+++ b/deps/v8/test/mjsunit/tools/tickprocessor-test.func-info
@@ -0,0 +1,29 @@
+Statistical profiling result from v8.log, (3 ticks, 0 unaccounted, 0 excluded).
+
+ [Shared libraries]:
+ ticks total nonlib name
+
+ [JavaScript]:
+ ticks total nonlib name
+ 2 66.7% 66.7% Stub: CompareStub_GE
+ 1 33.3% 33.3% LazyCompile: DrawLine 3d-cube.js:17
+
+ [C++]:
+ ticks total nonlib name
+
+ [GC]:
+ ticks total nonlib name
+ 0 0.0%
+
+ [Bottom up (heavy) profile]:
+ Note: percentage shows a share of a particular caller in the total
+ amount of its parent calls.
+ Callers occupying less than 2.0% are not shown.
+
+ ticks parent name
+ 2 66.7% Stub: CompareStub_GE
+ 2 100.0% LazyCompile: DrawLine 3d-cube.js:17
+ 2 100.0% LazyCompile: DrawQube 3d-cube.js:188
+
+ 1 33.3% LazyCompile: DrawLine 3d-cube.js:17
+ 1 100.0% LazyCompile: DrawQube 3d-cube.js:188
diff --git a/deps/v8/test/mjsunit/tools/tickprocessor-test.log b/deps/v8/test/mjsunit/tools/tickprocessor-test.log
index 75daad6b2..80e7ec1a8 100644
--- a/deps/v8/test/mjsunit/tools/tickprocessor-test.log
+++ b/deps/v8/test/mjsunit/tools/tickprocessor-test.log
@@ -6,19 +6,20 @@ code-creation,Stub,0xf540a100,474,"CEntryStub"
code-creation,Script,0xf541cd80,736,"exp.js"
code-creation,Stub,0xf541d0e0,47,"RuntimeStub_Math_exp"
code-creation,LazyCompile,0xf541d120,145,"exp native math.js:41"
+function-creation,0xf441d280,0xf541d120
code-creation,LoadIC,0xf541d280,117,"j"
code-creation,LoadIC,0xf541d360,63,"i"
-tick,0x80f82d1,0xffdfe880,0,0xf541ce5c
-tick,0x80f89a1,0xffdfecf0,0,0xf541ce5c
-tick,0x8123b5c,0xffdff1a0,0,0xf541d1a1,0xf541ceea
-tick,0x8123b65,0xffdff1a0,0,0xf541d1a1,0xf541ceea
-tick,0xf541d2be,0xffdff1e4,0
-tick,0xf541d320,0xffdff1dc,0
-tick,0xf541d384,0xffdff1d8,0
-tick,0xf7db94da,0xffdff0ec,0,0xf541d1a1,0xf541ceea
-tick,0xf7db951c,0xffdff0f0,0,0xf541d1a1,0xf541ceea
-tick,0xf7dbc508,0xffdff14c,0,0xf541d1a1,0xf541ceea
-tick,0xf7dbff21,0xffdff198,0,0xf541d1a1,0xf541ceea
-tick,0xf7edec90,0xffdff0ec,0,0xf541d1a1,0xf541ceea
-tick,0xffffe402,0xffdff488,0
+tick,0x80f82d1,0xffdfe880,0,0,0xf541ce5c
+tick,0x80f89a1,0xffdfecf0,0,0,0xf541ce5c
+tick,0x8123b5c,0xffdff1a0,0,0,0xf541d1a1,0xf541ceea
+tick,0x8123b65,0xffdff1a0,0,0,0xf541d1a1,0xf541ceea
+tick,0xf541d2be,0xffdff1e4,0,0
+tick,0xf541d320,0xffdff1dc,0,0
+tick,0xf541d384,0xffdff1d8,0,0
+tick,0xf7db94da,0xffdff0ec,0,0,0xf541d1a1,0xf541ceea
+tick,0xf7db951c,0xffdff0f0,0,0,0xf541d1a1,0xf541ceea
+tick,0xf7dbc508,0xffdff14c,0,0,0xf541d1a1,0xf541ceea
+tick,0xf7dbff21,0xffdff198,0,0,0xf541d1a1,0xf541ceea
+tick,0xf7edec90,0xffdff0ec,0,0,0xf541d1a1,0xf541ceea
+tick,0xffffe402,0xffdff488,0,0
profiler,"end"
diff --git a/deps/v8/test/mjsunit/tools/tickprocessor.js b/deps/v8/test/mjsunit/tools/tickprocessor.js
index 83bdac8ab..abcde897e 100644
--- a/deps/v8/test/mjsunit/tools/tickprocessor.js
+++ b/deps/v8/test/mjsunit/tools/tickprocessor.js
@@ -334,7 +334,7 @@ function PrintMonitor(outputOrFileName) {
print = function(str) {
var strSplit = str.split('\n');
for (var i = 0; i < strSplit.length; ++i) {
- s = strSplit[i];
+ var s = strSplit[i];
realOut.push(s);
if (outputPos < expectedOut.length) {
if (expectedOut[outputPos] != s) {
@@ -400,7 +400,10 @@ function driveTickProcessorTest(
'tickprocessor-test.log', 'tickprocessor-test.ignore-unknown'],
'GcState': [
false, false, TickProcessor.VmStates.GC,
- 'tickprocessor-test.log', 'tickprocessor-test.gc-state']
+ 'tickprocessor-test.log', 'tickprocessor-test.gc-state'],
+ 'FunctionInfo': [
+ false, false, null,
+ 'tickprocessor-test-func-info.log', 'tickprocessor-test.func-info']
};
for (var testName in testData) {
print('=== testProcessing-' + testName + ' ===');
diff --git a/deps/v8/test/mjsunit/value-wrapper.js b/deps/v8/test/mjsunit/value-wrapper.js
new file mode 100644
index 000000000..88330b449
--- /dev/null
+++ b/deps/v8/test/mjsunit/value-wrapper.js
@@ -0,0 +1,164 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// When calling user-defined functions on strings, booleans or
+// numbers, we should create a wrapper object.
+
+// When running the tests use loops to ensure that the call site moves through
+// the different IC states and that both the runtime system and the generated
+// IC code is tested.
+function RunTests() {
+ for (var i = 0; i < 10; i++) {
+ assertEquals('object', 'xxx'.TypeOfThis());
+ assertEquals('object', true.TypeOfThis(2,3));
+ assertEquals('object', false.TypeOfThis());
+ assertEquals('object', (42).TypeOfThis());
+ assertEquals('object', (3.14).TypeOfThis());
+ }
+
+ for (var i = 0; i < 10; i++) {
+ assertEquals('object', 'xxx'['TypeOfThis']());
+ assertEquals('object', true['TypeOfThis']());
+ assertEquals('object', false['TypeOfThis']());
+ assertEquals('object', (42)['TypeOfThis']());
+ assertEquals('object', (3.14)['TypeOfThis']());
+ }
+
+ function CallTypeOfThis(obj) {
+ assertEquals('object', obj.TypeOfThis());
+ }
+
+ for (var i = 0; i < 10; i++) {
+ CallTypeOfThis('xxx');
+ CallTypeOfThis(true);
+ CallTypeOfThis(false);
+ CallTypeOfThis(42);
+ CallTypeOfThis(3.14);
+ }
+
+ function TestWithWith(obj) {
+ with (obj) {
+ for (var i = 0; i < 10; i++) {
+ assertEquals('object', TypeOfThis());
+ }
+ }
+ }
+
+ TestWithWith('xxx');
+ TestWithWith(true);
+ TestWithWith(false);
+ TestWithWith(42);
+ TestWithWith(3.14);
+
+ for (var i = 0; i < 10; i++) {
+ assertEquals('object', true[7]());
+ assertEquals('object', false[7]());
+ assertEquals('object', (42)[7]());
+ assertEquals('object', (3.14)[7]());
+ }
+
+ for (var i = 0; i < 10; i++) {
+ assertEquals('object', typeof 'xxx'.ObjectValueOf());
+ assertEquals('object', typeof true.ObjectValueOf());
+ assertEquals('object', typeof false.ObjectValueOf());
+ assertEquals('object', typeof (42).ObjectValueOf());
+ assertEquals('object', typeof (3.14).ObjectValueOf());
+ }
+
+ for (var i = 0; i < 10; i++) {
+ assertEquals('[object String]', 'xxx'.ObjectToString());
+ assertEquals('[object Boolean]', true.ObjectToString());
+ assertEquals('[object Boolean]', false.ObjectToString());
+ assertEquals('[object Number]', (42).ObjectToString());
+ assertEquals('[object Number]', (3.14).ObjectToString());
+ }
+}
+
+function TypeOfThis() { return typeof this; }
+
+// Test with normal setup of prototype.
+String.prototype.TypeOfThis = TypeOfThis;
+Boolean.prototype.TypeOfThis = TypeOfThis;
+Number.prototype.TypeOfThis = TypeOfThis;
+Boolean.prototype[7] = TypeOfThis;
+Number.prototype[7] = TypeOfThis;
+
+String.prototype.ObjectValueOf = Object.prototype.valueOf;
+Boolean.prototype.ObjectValueOf = Object.prototype.valueOf;
+Number.prototype.ObjectValueOf = Object.prototype.valueOf;
+
+String.prototype.ObjectToString = Object.prototype.toString;
+Boolean.prototype.ObjectToString = Object.prototype.toString;
+Number.prototype.ObjectToString = Object.prototype.toString;
+
+RunTests();
+
+// Run test after properties have been set to a different value.
+String.prototype.TypeOfThis = 'x';
+Boolean.prototype.TypeOfThis = 'x';
+Number.prototype.TypeOfThis = 'x';
+Boolean.prototype[7] = 'x';
+Number.prototype[7] = 'x';
+
+String.prototype.TypeOfThis = TypeOfThis;
+Boolean.prototype.TypeOfThis = TypeOfThis;
+Number.prototype.TypeOfThis = TypeOfThis;
+Boolean.prototype[7] = TypeOfThis;
+Number.prototype[7] = TypeOfThis;
+
+RunTests();
+
+// Force the prototype into slow case and run the test again.
+delete String.prototype.TypeOfThis;
+delete Boolean.prototype.TypeOfThis;
+delete Number.prototype.TypeOfThis;
+Boolean.prototype[7];
+Number.prototype[7];
+
+String.prototype.TypeOfThis = TypeOfThis;
+Boolean.prototype.TypeOfThis = TypeOfThis;
+Number.prototype.TypeOfThis = TypeOfThis;
+Boolean.prototype[7] = TypeOfThis;
+Number.prototype[7] = TypeOfThis;
+
+RunTests();
+
+// According to ES3 15.3.4.3 the this value passed to Function.prototyle.apply
+// should wrapped. According to ES5 it should not.
+assertEquals('object', TypeOfThis.apply('xxx', []));
+assertEquals('object', TypeOfThis.apply(true, []));
+assertEquals('object', TypeOfThis.apply(false, []));
+assertEquals('object', TypeOfThis.apply(42, []));
+assertEquals('object', TypeOfThis.apply(3.14, []));
+
+// According to ES3 15.3.4.3 the this value passed to Function.prototyle.call
+// should wrapped. According to ES5 it should not.
+assertEquals('object', TypeOfThis.call('xxx'));
+assertEquals('object', TypeOfThis.call(true));
+assertEquals('object', TypeOfThis.call(false));
+assertEquals('object', TypeOfThis.call(42));
+assertEquals('object', TypeOfThis.call(3.14));
diff --git a/deps/v8/tools/codemap.js b/deps/v8/tools/codemap.js
index af511f642..8eb2acbc2 100644
--- a/deps/v8/tools/codemap.js
+++ b/deps/v8/tools/codemap.js
@@ -196,6 +196,18 @@ devtools.profiler.CodeMap.prototype.findEntry = function(addr) {
/**
+ * Returns a dynamic code entry using its starting address.
+ *
+ * @param {number} addr Address.
+ */
+devtools.profiler.CodeMap.prototype.findDynamicEntryByStartAddress =
+ function(addr) {
+ var node = this.dynamics_.find(addr);
+ return node ? node.value : null;
+};
+
+
+/**
* Returns an array of all dynamic code entries.
*/
devtools.profiler.CodeMap.prototype.getAllDynamicEntries = function() {
diff --git a/deps/v8/tools/csvparser.js b/deps/v8/tools/csvparser.js
index 9e58deaea..6e101e206 100644
--- a/deps/v8/tools/csvparser.js
+++ b/deps/v8/tools/csvparser.js
@@ -39,17 +39,17 @@ devtools.profiler.CsvParser = function() {
/**
- * A regex for matching a trailing quote.
+ * A regex for matching a CSV field.
* @private
*/
-devtools.profiler.CsvParser.TRAILING_QUOTE_RE_ = /\"$/;
+devtools.profiler.CsvParser.CSV_FIELD_RE_ = /^"((?:[^"]|"")*)"|([^,]*)/;
/**
* A regex for matching a double quote.
* @private
*/
-devtools.profiler.CsvParser.DOUBLE_QUOTE_RE_ = /\"\"/g;
+devtools.profiler.CsvParser.DOUBLE_QUOTE_RE_ = /""/g;
/**
@@ -58,41 +58,26 @@ devtools.profiler.CsvParser.DOUBLE_QUOTE_RE_ = /\"\"/g;
* @param {string} line Input line.
*/
devtools.profiler.CsvParser.prototype.parseLine = function(line) {
- var insideQuotes = false;
+ var fieldRe = devtools.profiler.CsvParser.CSV_FIELD_RE_;
+ var doubleQuoteRe = devtools.profiler.CsvParser.DOUBLE_QUOTE_RE_;
+ var pos = 0;
+ var endPos = line.length;
var fields = [];
- var prevPos = 0;
- for (var i = 0, n = line.length; i < n; ++i) {
- switch (line.charAt(i)) {
- case ',':
- if (!insideQuotes) {
- fields.push(line.substring(prevPos, i));
- prevPos = i + 1;
- }
- break;
- case '"':
- if (!insideQuotes) {
- insideQuotes = true;
- // Skip the leading quote.
- prevPos++;
- } else {
- if (i + 1 < n && line.charAt(i + 1) != '"') {
- insideQuotes = false;
- } else {
- i++;
- }
- }
- break;
- }
- }
- if (n > 0) {
- fields.push(line.substring(prevPos));
- }
-
- for (i = 0; i < fields.length; ++i) {
- // Eliminate trailing quotes.
- fields[i] = fields[i].replace(devtools.profiler.CsvParser.TRAILING_QUOTE_RE_, '');
- // Convert quoted quotes into single ones.
- fields[i] = fields[i].replace(devtools.profiler.CsvParser.DOUBLE_QUOTE_RE_, '"');
+ if (endPos > 0) {
+ do {
+ var fieldMatch = fieldRe.exec(line.substr(pos));
+ if (typeof fieldMatch[1] === "string") {
+ var field = fieldMatch[1];
+ pos += field.length + 3; // Skip comma and quotes.
+ fields.push(field.replace(doubleQuoteRe, '"'));
+ } else {
+ // The second field pattern will match anything, thus
+ // in the worst case the match will be an empty string.
+ var field = fieldMatch[2];
+ pos += field.length + 1; // Skip comma.
+ fields.push(field);
+ }
+ } while (pos <= endPos);
}
return fields;
};
diff --git a/deps/v8/tools/gyp/v8.gyp b/deps/v8/tools/gyp/v8.gyp
index 4368eb81b..f2d1b98ee 100644
--- a/deps/v8/tools/gyp/v8.gyp
+++ b/deps/v8/tools/gyp/v8.gyp
@@ -252,6 +252,8 @@
'../../src/counters.cc',
'../../src/counters.h',
'../../src/cpu.h',
+ '../../src/data-flow.cc',
+ '../../src/data-flow.h',
'../../src/dateparser.cc',
'../../src/dateparser.h',
'../../src/dateparser-inl.h',
@@ -277,6 +279,8 @@
'../../src/frames-inl.h',
'../../src/frames.cc',
'../../src/frames.h',
+ '../../src/full-codegen.cc',
+ '../../src/full-codegen.h',
'../../src/func-name-inferrer.cc',
'../../src/func-name-inferrer.h',
'../../src/global-handles.cc',
@@ -411,6 +415,7 @@
'../../src/arm/fast-codegen-arm.cc',
'../../src/arm/frames-arm.cc',
'../../src/arm/frames-arm.h',
+ '../../src/arm/full-codegen-arm.cc',
'../../src/arm/ic-arm.cc',
'../../src/arm/jump-target-arm.cc',
'../../src/arm/macro-assembler-arm.cc',
@@ -449,6 +454,7 @@
'../../src/ia32/fast-codegen-ia32.cc',
'../../src/ia32/frames-ia32.cc',
'../../src/ia32/frames-ia32.h',
+ '../../src/ia32/full-codegen-ia32.cc',
'../../src/ia32/ic-ia32.cc',
'../../src/ia32/jump-target-ia32.cc',
'../../src/ia32/macro-assembler-ia32.cc',
@@ -478,6 +484,7 @@
'../../src/x64/fast-codegen-x64.cc',
'../../src/x64/frames-x64.cc',
'../../src/x64/frames-x64.h',
+ '../../src/x64/full-codegen-x64.cc',
'../../src/x64/ic-x64.cc',
'../../src/x64/jump-target-x64.cc',
'../../src/x64/macro-assembler-x64.cc',
diff --git a/deps/v8/tools/logreader.js b/deps/v8/tools/logreader.js
index 88ab90774..20a1f5444 100644
--- a/deps/v8/tools/logreader.js
+++ b/deps/v8/tools/logreader.js
@@ -139,11 +139,12 @@ devtools.profiler.LogReader.prototype.processLogChunk = function(chunk) {
* Processes stack record.
*
* @param {number} pc Program counter.
+ * @param {number} func JS Function.
* @param {Array.<string>} stack String representation of a stack.
* @return {Array.<number>} Processed stack.
*/
-devtools.profiler.LogReader.prototype.processStack = function(pc, stack) {
- var fullStack = [pc];
+devtools.profiler.LogReader.prototype.processStack = function(pc, func, stack) {
+ var fullStack = func ? [pc, func] : [pc];
var prevFrame = pc;
for (var i = 0, n = stack.length; i < n; ++i) {
var frame = stack[i];
diff --git a/deps/v8/tools/profile.js b/deps/v8/tools/profile.js
index db4b542ff..b2de6490e 100644
--- a/deps/v8/tools/profile.js
+++ b/deps/v8/tools/profile.js
@@ -43,6 +43,11 @@ devtools.profiler.Profile = function() {
this.bottomUpTree_ = new devtools.profiler.CallTree();
};
+/**
+ * Version of profiler log.
+ */
+devtools.profiler.Profile.VERSION = 2;
+
/**
* Returns whether a function with the specified name must be skipped.
@@ -134,6 +139,21 @@ devtools.profiler.Profile.prototype.addCode = function(
/**
+ * Creates an alias entry for a code entry.
+ *
+ * @param {number} aliasAddr Alias address.
+ * @param {number} addr Code entry address.
+ */
+devtools.profiler.Profile.prototype.addCodeAlias = function(
+ aliasAddr, addr) {
+ var entry = this.codeMap_.findDynamicEntryByStartAddress(addr);
+ if (entry) {
+ this.codeMap_.addCode(aliasAddr, entry);
+ }
+};
+
+
+/**
* Reports about moving of a dynamic code entry.
*
* @param {number} from Current code entry address.
@@ -163,6 +183,41 @@ devtools.profiler.Profile.prototype.deleteCode = function(start) {
/**
+ * Reports about moving of a dynamic code entry.
+ *
+ * @param {number} from Current code entry address.
+ * @param {number} to New code entry address.
+ */
+devtools.profiler.Profile.prototype.safeMoveDynamicCode = function(from, to) {
+ if (this.codeMap_.findDynamicEntryByStartAddress(from)) {
+ this.codeMap_.moveCode(from, to);
+ }
+};
+
+
+/**
+ * Reports about deletion of a dynamic code entry.
+ *
+ * @param {number} start Starting address.
+ */
+devtools.profiler.Profile.prototype.safeDeleteDynamicCode = function(start) {
+ if (this.codeMap_.findDynamicEntryByStartAddress(start)) {
+ this.codeMap_.deleteCode(start);
+ }
+};
+
+
+/**
+ * Retrieves a code entry by an address.
+ *
+ * @param {number} addr Entry address.
+ */
+devtools.profiler.Profile.prototype.findEntry = function(addr) {
+ return this.codeMap_.findEntry(addr);
+};
+
+
+/**
* Records a tick event. Stack must contain a sequence of
* addresses starting with the program counter value.
*
@@ -345,6 +400,21 @@ devtools.profiler.Profile.DynamicCodeEntry.prototype.getName = function() {
/**
+ * Returns raw node name (without type decoration).
+ */
+devtools.profiler.Profile.DynamicCodeEntry.prototype.getRawName = function() {
+ return this.name;
+};
+
+
+devtools.profiler.Profile.DynamicCodeEntry.prototype.isJSFunction = function() {
+ return this.type == "Function" ||
+ this.type == "LazyCompile" ||
+ this.type == "Script";
+};
+
+
+/**
* Constructs a call graph.
*
* @constructor
diff --git a/deps/v8/tools/test.py b/deps/v8/tools/test.py
index 75b4f61f7..f17e9b1c3 100755
--- a/deps/v8/tools/test.py
+++ b/deps/v8/tools/test.py
@@ -639,10 +639,7 @@ class Context(object):
name = name + '.exe'
return name
-def RunTestCases(all_cases, progress, tasks):
- def DoSkip(case):
- return SKIP in c.outcomes or SLOW in c.outcomes
- cases_to_run = [ c for c in all_cases if not DoSkip(c) ]
+def RunTestCases(cases_to_run, progress, tasks):
progress = PROGRESS_INDICATORS[progress](cases_to_run)
return progress.Run(tasks)
@@ -1335,13 +1332,16 @@ def Main():
PrintReport(all_cases)
result = None
- if len(all_cases) == 0:
+ def DoSkip(case):
+ return SKIP in case.outcomes or SLOW in case.outcomes
+ cases_to_run = [ c for c in all_cases if not DoSkip(c) ]
+ if len(cases_to_run) == 0:
print "No tests to run."
return 0
else:
try:
start = time.time()
- if RunTestCases(all_cases, options.progress, options.j):
+ if RunTestCases(cases_to_run, options.progress, options.j):
result = 0
else:
result = 1
@@ -1355,7 +1355,7 @@ def Main():
# test output.
print
sys.stderr.write("--- Total time: %s ---\n" % FormatTime(duration))
- timed_tests = [ t.case for t in all_cases if not t.case.duration is None ]
+ timed_tests = [ t.case for t in cases_to_run if not t.case.duration is None ]
timed_tests.sort(lambda a, b: a.CompareTime(b))
index = 1
for entry in timed_tests[:20]:
diff --git a/deps/v8/tools/tickprocessor-driver.js b/deps/v8/tools/tickprocessor-driver.js
index dc6779607..4201e43d3 100644
--- a/deps/v8/tools/tickprocessor-driver.js
+++ b/deps/v8/tools/tickprocessor-driver.js
@@ -44,10 +44,16 @@ var entriesProviders = {
};
var params = processArguments(arguments);
+var snapshotLogProcessor;
+if (params.snapshotLogFileName) {
+ snapshotLogProcessor = new SnapshotLogProcessor();
+ snapshotLogProcessor.processLogFile(params.snapshotLogFileName);
+}
var tickProcessor = new TickProcessor(
new (entriesProviders[params.platform])(params.nm),
params.separateIc,
params.ignoreUnknown,
- params.stateFilter);
+ params.stateFilter,
+ snapshotLogProcessor);
tickProcessor.processLogFile(params.logFileName);
tickProcessor.printStatistics();
diff --git a/deps/v8/tools/tickprocessor.js b/deps/v8/tools/tickprocessor.js
index fd23987d9..35422e2ec 100644
--- a/deps/v8/tools/tickprocessor.js
+++ b/deps/v8/tools/tickprocessor.js
@@ -53,14 +53,79 @@ function readFile(fileName) {
function inherits(childCtor, parentCtor) {
- function tempCtor() {};
- tempCtor.prototype = parentCtor.prototype;
- childCtor.prototype = new tempCtor();
+ childCtor.prototype.__proto__ = parentCtor.prototype;
+};
+
+
+function SnapshotLogProcessor() {
+ devtools.profiler.LogReader.call(this, {
+ 'code-creation': {
+ parsers: [null, this.createAddressParser('code'), parseInt, null],
+ processor: this.processCodeCreation, backrefs: true },
+ 'code-move': { parsers: [this.createAddressParser('code'),
+ this.createAddressParser('code-move-to')],
+ processor: this.processCodeMove, backrefs: true },
+ 'code-delete': { parsers: [this.createAddressParser('code')],
+ processor: this.processCodeDelete, backrefs: true },
+ 'snapshot-pos': { parsers: [this.createAddressParser('code'), parseInt],
+ processor: this.processSnapshotPosition, backrefs: true }});
+
+ Profile.prototype.handleUnknownCode = function(operation, addr) {
+ var op = devtools.profiler.Profile.Operation;
+ switch (operation) {
+ case op.MOVE:
+ print('Snapshot: Code move event for unknown code: 0x' +
+ addr.toString(16));
+ break;
+ case op.DELETE:
+ print('Snapshot: Code delete event for unknown code: 0x' +
+ addr.toString(16));
+ break;
+ }
+ };
+
+ this.profile_ = new Profile();
+ this.serializedEntries_ = [];
+}
+inherits(SnapshotLogProcessor, devtools.profiler.LogReader);
+
+
+SnapshotLogProcessor.prototype.processCodeCreation = function(
+ type, start, size, name) {
+ var entry = this.profile_.addCode(
+ this.expandAlias(type), name, start, size);
+};
+
+
+SnapshotLogProcessor.prototype.processCodeMove = function(from, to) {
+ this.profile_.moveCode(from, to);
+};
+
+
+SnapshotLogProcessor.prototype.processCodeDelete = function(start) {
+ this.profile_.deleteCode(start);
+};
+
+
+SnapshotLogProcessor.prototype.processSnapshotPosition = function(addr, pos) {
+ this.serializedEntries_[pos] = this.profile_.findEntry(addr);
+};
+
+
+SnapshotLogProcessor.prototype.processLogFile = function(fileName) {
+ var contents = readFile(fileName);
+ this.processLogChunk(contents);
+};
+
+
+SnapshotLogProcessor.prototype.getSerializedEntryName = function(pos) {
+ var entry = this.serializedEntries_[pos];
+ return entry ? entry.getRawName() : null;
};
function TickProcessor(
- cppEntriesProvider, separateIc, ignoreUnknown, stateFilter) {
+ cppEntriesProvider, separateIc, ignoreUnknown, stateFilter, snapshotLogProcessor) {
devtools.profiler.LogReader.call(this, {
'shared-library': { parsers: [null, parseInt, parseInt],
processor: this.processSharedLibrary },
@@ -72,8 +137,19 @@ function TickProcessor(
processor: this.processCodeMove, backrefs: true },
'code-delete': { parsers: [this.createAddressParser('code')],
processor: this.processCodeDelete, backrefs: true },
+ 'function-creation': { parsers: [this.createAddressParser('code'),
+ this.createAddressParser('function-obj')],
+ processor: this.processFunctionCreation, backrefs: true },
+ 'function-move': { parsers: [this.createAddressParser('code'),
+ this.createAddressParser('code-move-to')],
+ processor: this.processFunctionMove, backrefs: true },
+ 'function-delete': { parsers: [this.createAddressParser('code')],
+ processor: this.processFunctionDelete, backrefs: true },
+ 'snapshot-pos': { parsers: [this.createAddressParser('code'), parseInt],
+ processor: this.processSnapshotPosition, backrefs: true },
'tick': { parsers: [this.createAddressParser('code'),
- this.createAddressParser('stack'), parseInt, 'var-args'],
+ this.createAddressParser('stack'),
+ this.createAddressParser('func'), parseInt, 'var-args'],
processor: this.processTick, backrefs: true },
'heap-sample-begin': { parsers: [null, null, parseInt],
processor: this.processHeapSampleBegin },
@@ -95,6 +171,8 @@ function TickProcessor(
this.cppEntriesProvider_ = cppEntriesProvider;
this.ignoreUnknown_ = ignoreUnknown;
this.stateFilter_ = stateFilter;
+ this.snapshotLogProcessor_ = snapshotLogProcessor;
+ this.deserializedEntriesNames_ = [];
var ticks = this.ticks_ =
{ total: 0, unaccounted: 0, excluded: 0, gc: 0 };
@@ -202,6 +280,7 @@ TickProcessor.prototype.processSharedLibrary = function(
TickProcessor.prototype.processCodeCreation = function(
type, start, size, name) {
+ name = this.deserializedEntriesNames_[start] || name;
var entry = this.profile_.addCode(
this.expandAlias(type), name, start, size);
};
@@ -217,12 +296,36 @@ TickProcessor.prototype.processCodeDelete = function(start) {
};
+TickProcessor.prototype.processFunctionCreation = function(
+ functionAddr, codeAddr) {
+ this.profile_.addCodeAlias(functionAddr, codeAddr);
+};
+
+
+TickProcessor.prototype.processFunctionMove = function(from, to) {
+ this.profile_.safeMoveDynamicCode(from, to);
+};
+
+
+TickProcessor.prototype.processFunctionDelete = function(start) {
+ this.profile_.safeDeleteDynamicCode(start);
+};
+
+
+TickProcessor.prototype.processSnapshotPosition = function(addr, pos) {
+ if (this.snapshotLogProcessor_) {
+ this.deserializedEntriesNames_[addr] =
+ this.snapshotLogProcessor_.getSerializedEntryName(pos);
+ }
+};
+
+
TickProcessor.prototype.includeTick = function(vmState) {
return this.stateFilter_ == null || this.stateFilter_ == vmState;
};
-TickProcessor.prototype.processTick = function(pc, sp, vmState, stack) {
+TickProcessor.prototype.processTick = function(pc, sp, func, vmState, stack) {
this.ticks_.total++;
if (vmState == TickProcessor.VmStates.GC) this.ticks_.gc++;
if (!this.includeTick(vmState)) {
@@ -230,7 +333,19 @@ TickProcessor.prototype.processTick = function(pc, sp, vmState, stack) {
return;
}
- this.profile_.recordTick(this.processStack(pc, stack));
+ if (func) {
+ var funcEntry = this.profile_.findEntry(func);
+ if (!funcEntry || !funcEntry.isJSFunction || !funcEntry.isJSFunction()) {
+ func = 0;
+ } else {
+ var currEntry = this.profile_.findEntry(pc);
+ if (!currEntry || !currEntry.isJSFunction || currEntry.isJSFunction()) {
+ func = 0;
+ }
+ }
+ }
+
+ this.profile_.recordTick(this.processStack(pc, func, stack));
};
@@ -263,7 +378,7 @@ TickProcessor.prototype.processJSProducer = function(constructor, stack) {
if (stack.length == 0) return;
var first = stack.shift();
var processedStack =
- this.profile_.resolveAndFilterFuncs_(this.processStack(first, stack));
+ this.profile_.resolveAndFilterFuncs_(this.processStack(first, 0, stack));
processedStack.unshift(constructor);
this.currentProducerProfile_.addPath(processedStack);
};
@@ -648,7 +763,9 @@ function ArgumentsProcessor(args) {
'--mac': ['platform', 'mac',
'Specify that we are running on Mac OS X platform'],
'--nm': ['nm', 'nm',
- 'Specify the \'nm\' executable to use (e.g. --nm=/my_dir/nm)']
+ 'Specify the \'nm\' executable to use (e.g. --nm=/my_dir/nm)'],
+ '--snapshot-log': ['snapshotLogFileName', 'snapshot.log',
+ 'Specify snapshot log file to use (e.g. --snapshot-log=snapshot.log)']
};
this.argsDispatch_['--js'] = this.argsDispatch_['-j'];
this.argsDispatch_['--gc'] = this.argsDispatch_['-g'];
@@ -660,6 +777,7 @@ function ArgumentsProcessor(args) {
ArgumentsProcessor.DEFAULTS = {
logFileName: 'v8.log',
+ snapshotLogFileName: null,
platform: 'unix',
stateFilter: null,
ignoreUnknown: false,
diff --git a/deps/v8/tools/tickprocessor.py b/deps/v8/tools/tickprocessor.py
index cc540d3db..c932e3fc4 100644
--- a/deps/v8/tools/tickprocessor.py
+++ b/deps/v8/tools/tickprocessor.py
@@ -59,6 +59,8 @@ class CodeEntry(object):
def IsICEntry(self):
return False
+ def IsJSFunction(self):
+ return False
class SharedLibraryEntry(CodeEntry):
@@ -124,6 +126,8 @@ class JSCodeEntry(CodeEntry):
return self.type in ('CallIC', 'LoadIC', 'StoreIC') or \
(self.type == 'Builtin' and self.builtin_ic_re.match(self.name))
+ def IsJSFunction(self):
+ return self.type in ('Function', 'LazyCompile', 'Script')
class CodeRegion(object):
@@ -212,13 +216,19 @@ class TickProcessor(object):
for row in logreader:
row_num += 1
if row[0] == 'tick':
- self.ProcessTick(int(row[1], 16), int(row[2], 16), int(row[3]), self.PreprocessStack(row[4:]))
+ self.ProcessTick(int(row[1], 16), int(row[2], 16), int(row[3], 16), int(row[4]), self.PreprocessStack(row[5:]))
elif row[0] == 'code-creation':
self.ProcessCodeCreation(row[1], int(row[2], 16), int(row[3]), row[4])
elif row[0] == 'code-move':
self.ProcessCodeMove(int(row[1], 16), int(row[2], 16))
elif row[0] == 'code-delete':
self.ProcessCodeDelete(int(row[1], 16))
+ elif row[0] == 'function-creation':
+ self.ProcessFunctionCreation(int(row[1], 16), int(row[2], 16))
+ elif row[0] == 'function-move':
+ self.ProcessFunctionMove(int(row[1], 16), int(row[2], 16))
+ elif row[0] == 'function-delete':
+ self.ProcessFunctionDelete(int(row[1], 16))
elif row[0] == 'shared-library':
self.AddSharedLibraryEntry(row[1], int(row[2], 16), int(row[3], 16))
self.ParseVMSymbols(row[1], int(row[2], 16), int(row[3], 16))
@@ -275,6 +285,27 @@ class TickProcessor(object):
except splaytree.KeyNotFoundError:
print('Code delete event for unknown code: 0x%x' % from_addr)
+ def ProcessFunctionCreation(self, func_addr, code_addr):
+ js_entry_node = self.js_entries.Find(code_addr)
+ if js_entry_node:
+ js_entry = js_entry_node.value
+ self.js_entries.Insert(func_addr, JSCodeEntry(func_addr, js_entry.name, js_entry.type, 1, None))
+
+ def ProcessFunctionMove(self, from_addr, to_addr):
+ try:
+ removed_node = self.js_entries.Remove(from_addr)
+ removed_node.value.SetStartAddress(to_addr);
+ self.js_entries.Insert(to_addr, removed_node.value)
+ except splaytree.KeyNotFoundError:
+ return
+
+ def ProcessFunctionDelete(self, from_addr):
+ try:
+ removed_node = self.js_entries.Remove(from_addr)
+ self.deleted_code.append(removed_node.value)
+ except splaytree.KeyNotFoundError:
+ return
+
def ProcessBeginCodeRegion(self, id, assm, start, name):
if not assm in self.pending_assemblers:
self.pending_assemblers[assm] = Assembler()
@@ -320,7 +351,7 @@ class TickProcessor(object):
result.append(entry.ToString())
return result
- def ProcessTick(self, pc, sp, state, stack):
+ def ProcessTick(self, pc, sp, func, state, stack):
if state == VMStates['GC']:
self.number_of_gc_ticks += 1
if not self.IncludeTick(pc, sp, state):
@@ -337,11 +368,16 @@ class TickProcessor(object):
if len(stack) > 0:
caller_pc = stack.pop(0)
self.total_number_of_ticks -= 1
- self.ProcessTick(caller_pc, sp, state, stack)
+ self.ProcessTick(caller_pc, sp, func, state, stack)
else:
self.unaccounted_number_of_ticks += 1
else:
- entry.Tick(pc, self.ProcessStack(stack))
+ processed_stack = self.ProcessStack(stack)
+ if not entry.IsSharedLibraryEntry() and not entry.IsJSFunction():
+ func_entry_node = self.js_entries.Find(func)
+ if func_entry_node and func_entry_node.value.IsJSFunction():
+ processed_stack.insert(0, func_entry_node.value.ToString())
+ entry.Tick(pc, processed_stack)
if self.call_graph_json:
self.AddToPackedStacks(pc, stack)
diff --git a/deps/v8/tools/visual_studio/common.vsprops b/deps/v8/tools/visual_studio/common.vsprops
index 213a0816a..e4f75a509 100644
--- a/deps/v8/tools/visual_studio/common.vsprops
+++ b/deps/v8/tools/visual_studio/common.vsprops
@@ -28,7 +28,6 @@
GenerateDebugInformation="true"
MapFileName="$(OutDir)\$(TargetName).map"
ImportLibrary="$(OutDir)\lib\$(TargetName).lib"
- TargetMachine="1"
FixedBaseAddress="1"
AdditionalOptions="/IGNORE:4221 /NXCOMPAT"
/>
diff --git a/deps/v8/tools/visual_studio/d8.vcproj b/deps/v8/tools/visual_studio/d8.vcproj
index 21636ba35..8372c6760 100644
--- a/deps/v8/tools/visual_studio/d8.vcproj
+++ b/deps/v8/tools/visual_studio/d8.vcproj
@@ -70,9 +70,6 @@
Name="VCAppVerifierTool"
/>
<Tool
- Name="VCWebDeploymentTool"
- />
- <Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
@@ -131,9 +128,6 @@
Name="VCAppVerifierTool"
/>
<Tool
- Name="VCWebDeploymentTool"
- />
- <Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
diff --git a/deps/v8/tools/visual_studio/d8_arm.vcproj b/deps/v8/tools/visual_studio/d8_arm.vcproj
index fbebdb35c..66adcec3d 100644
--- a/deps/v8/tools/visual_studio/d8_arm.vcproj
+++ b/deps/v8/tools/visual_studio/d8_arm.vcproj
@@ -1,199 +1,193 @@
-<?xml version="1.0" encoding="Windows-1252"?>
-<VisualStudioProject
- ProjectType="Visual C++"
- Version="8.00"
- Name="d8"
- ProjectGUID="{7E4C7D2D-A4B9-40B9-8192-22654E626F6C}"
- RootNamespace="d8"
- Keyword="Win32Proj"
- >
- <Platforms>
- <Platform
- Name="Win32"
- />
- </Platforms>
- <ToolFiles>
- </ToolFiles>
- <Configurations>
- <Configuration
- Name="Debug|Win32"
- ConfigurationType="1"
- InheritedPropertySheets=".\common.vsprops;.\arm.vsprops;.\debug.vsprops"
- >
- <Tool
- Name="VCPreBuildEventTool"
- />
- <Tool
- Name="VCCustomBuildTool"
- />
- <Tool
- Name="VCXMLDataGeneratorTool"
- />
- <Tool
- Name="VCWebServiceProxyGeneratorTool"
- />
- <Tool
- Name="VCMIDLTool"
- />
- <Tool
- Name="VCCLCompilerTool"
- />
- <Tool
- Name="VCManagedResourceCompilerTool"
- />
- <Tool
- Name="VCResourceCompilerTool"
- />
- <Tool
- Name="VCPreLinkEventTool"
- />
- <Tool
- Name="VCLinkerTool"
- AdditionalDependencies="winmm.lib Ws2_32.lib"
- />
- <Tool
- Name="VCALinkTool"
- />
- <Tool
- Name="VCManifestTool"
- />
- <Tool
- Name="VCXDCMakeTool"
- />
- <Tool
- Name="VCBscMakeTool"
- />
- <Tool
- Name="VCFxCopTool"
- />
- <Tool
- Name="VCAppVerifierTool"
- />
- <Tool
- Name="VCWebDeploymentTool"
- />
- <Tool
- Name="VCPostBuildEventTool"
- />
- </Configuration>
- <Configuration
- Name="Release|Win32"
- ConfigurationType="1"
- InheritedPropertySheets=".\common.vsprops;.\arm.vsprops;.\release.vsprops"
- >
- <Tool
- Name="VCPreBuildEventTool"
- />
- <Tool
- Name="VCCustomBuildTool"
- />
- <Tool
- Name="VCXMLDataGeneratorTool"
- />
- <Tool
- Name="VCWebServiceProxyGeneratorTool"
- />
- <Tool
- Name="VCMIDLTool"
- />
- <Tool
- Name="VCCLCompilerTool"
- />
- <Tool
- Name="VCManagedResourceCompilerTool"
- />
- <Tool
- Name="VCResourceCompilerTool"
- />
- <Tool
- Name="VCPreLinkEventTool"
- />
- <Tool
- Name="VCLinkerTool"
- AdditionalDependencies="winmm.lib Ws2_32.lib"
- />
- <Tool
- Name="VCALinkTool"
- />
- <Tool
- Name="VCManifestTool"
- />
- <Tool
- Name="VCXDCMakeTool"
- />
- <Tool
- Name="VCBscMakeTool"
- />
- <Tool
- Name="VCFxCopTool"
- />
- <Tool
- Name="VCAppVerifierTool"
- />
- <Tool
- Name="VCWebDeploymentTool"
- />
- <Tool
- Name="VCPostBuildEventTool"
- />
- </Configuration>
- </Configurations>
- <References>
- </References>
- <Files>
- <File
- RelativePath="..\..\src\d8.cc"
- >
- </File>
- <File
- RelativePath="..\..\src\d8.h"
- >
- </File>
- <File
- RelativePath="..\..\src\d8-debug.cc"
- >
- </File>
- <File
- RelativePath="..\..\src\d8-debug.h"
- >
- </File>
- <File
- RelativePath="..\..\src\d8-windows.cc"
- >
- </File>
- <File
- RelativePath="..\..\src\d8.js"
- >
- <FileConfiguration
- Name="Debug|Win32"
- >
- <Tool
- Name="VCCustomBuildTool"
- Description="Processing js files..."
- CommandLine=".\d8js2c.cmd ..\..\src &quot;$(IntDir)\DerivedSources&quot;"
- Outputs="$(IntDir)\DerivedSources\natives.cc;$(IntDir)\DerivedSources\natives-empty.cc"
- />
- </FileConfiguration>
- <FileConfiguration
- Name="Release|Win32"
- >
- <Tool
- Name="VCCustomBuildTool"
- Description="Processing js files..."
- CommandLine=".\d8js2c.cmd ..\..\src &quot;$(IntDir)\DerivedSources&quot;"
- Outputs="$(IntDir)\DerivedSources\natives.cc;$(IntDir)\DerivedSources\natives-empty.cc"
- />
- </FileConfiguration>
- </File>
- <Filter
- Name="generated files"
- >
- <File
- RelativePath="$(IntDir)\DerivedSources\natives.cc"
- >
- </File>
- </Filter>
- </Files>
- <Globals>
- </Globals>
-</VisualStudioProject>
+<?xml version="1.0" encoding="Windows-1252"?>
+<VisualStudioProject
+ ProjectType="Visual C++"
+ Version="8.00"
+ Name="d8"
+ ProjectGUID="{7E4C7D2D-A4B9-40B9-8192-22654E626F6C}"
+ RootNamespace="d8"
+ Keyword="Win32Proj"
+ >
+ <Platforms>
+ <Platform
+ Name="Win32"
+ />
+ </Platforms>
+ <ToolFiles>
+ </ToolFiles>
+ <Configurations>
+ <Configuration
+ Name="Debug|Win32"
+ ConfigurationType="1"
+ InheritedPropertySheets=".\common.vsprops;.\arm.vsprops;.\debug.vsprops"
+ >
+ <Tool
+ Name="VCPreBuildEventTool"
+ />
+ <Tool
+ Name="VCCustomBuildTool"
+ />
+ <Tool
+ Name="VCXMLDataGeneratorTool"
+ />
+ <Tool
+ Name="VCWebServiceProxyGeneratorTool"
+ />
+ <Tool
+ Name="VCMIDLTool"
+ />
+ <Tool
+ Name="VCCLCompilerTool"
+ />
+ <Tool
+ Name="VCManagedResourceCompilerTool"
+ />
+ <Tool
+ Name="VCResourceCompilerTool"
+ />
+ <Tool
+ Name="VCPreLinkEventTool"
+ />
+ <Tool
+ Name="VCLinkerTool"
+ AdditionalDependencies="winmm.lib Ws2_32.lib"
+ />
+ <Tool
+ Name="VCALinkTool"
+ />
+ <Tool
+ Name="VCManifestTool"
+ />
+ <Tool
+ Name="VCXDCMakeTool"
+ />
+ <Tool
+ Name="VCBscMakeTool"
+ />
+ <Tool
+ Name="VCFxCopTool"
+ />
+ <Tool
+ Name="VCAppVerifierTool"
+ />
+ <Tool
+ Name="VCPostBuildEventTool"
+ />
+ </Configuration>
+ <Configuration
+ Name="Release|Win32"
+ ConfigurationType="1"
+ InheritedPropertySheets=".\common.vsprops;.\arm.vsprops;.\release.vsprops"
+ >
+ <Tool
+ Name="VCPreBuildEventTool"
+ />
+ <Tool
+ Name="VCCustomBuildTool"
+ />
+ <Tool
+ Name="VCXMLDataGeneratorTool"
+ />
+ <Tool
+ Name="VCWebServiceProxyGeneratorTool"
+ />
+ <Tool
+ Name="VCMIDLTool"
+ />
+ <Tool
+ Name="VCCLCompilerTool"
+ />
+ <Tool
+ Name="VCManagedResourceCompilerTool"
+ />
+ <Tool
+ Name="VCResourceCompilerTool"
+ />
+ <Tool
+ Name="VCPreLinkEventTool"
+ />
+ <Tool
+ Name="VCLinkerTool"
+ AdditionalDependencies="winmm.lib Ws2_32.lib"
+ />
+ <Tool
+ Name="VCALinkTool"
+ />
+ <Tool
+ Name="VCManifestTool"
+ />
+ <Tool
+ Name="VCXDCMakeTool"
+ />
+ <Tool
+ Name="VCBscMakeTool"
+ />
+ <Tool
+ Name="VCFxCopTool"
+ />
+ <Tool
+ Name="VCAppVerifierTool"
+ />
+ <Tool
+ Name="VCPostBuildEventTool"
+ />
+ </Configuration>
+ </Configurations>
+ <References>
+ </References>
+ <Files>
+ <File
+ RelativePath="..\..\src\d8.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\d8.h"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\d8-debug.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\d8-debug.h"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\d8-windows.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\d8.js"
+ >
+ <FileConfiguration
+ Name="Debug|Win32"
+ >
+ <Tool
+ Name="VCCustomBuildTool"
+ Description="Processing js files..."
+ CommandLine=".\d8js2c.cmd ..\..\src &quot;$(IntDir)\DerivedSources&quot;"
+ Outputs="$(IntDir)\DerivedSources\natives.cc;$(IntDir)\DerivedSources\natives-empty.cc"
+ />
+ </FileConfiguration>
+ <FileConfiguration
+ Name="Release|Win32"
+ >
+ <Tool
+ Name="VCCustomBuildTool"
+ Description="Processing js files..."
+ CommandLine=".\d8js2c.cmd ..\..\src &quot;$(IntDir)\DerivedSources&quot;"
+ Outputs="$(IntDir)\DerivedSources\natives.cc;$(IntDir)\DerivedSources\natives-empty.cc"
+ />
+ </FileConfiguration>
+ </File>
+ <Filter
+ Name="generated files"
+ >
+ <File
+ RelativePath="$(IntDir)\DerivedSources\natives.cc"
+ >
+ </File>
+ </Filter>
+ </Files>
+ <Globals>
+ </Globals>
+</VisualStudioProject>
diff --git a/deps/v8/tools/visual_studio/d8_x64.vcproj b/deps/v8/tools/visual_studio/d8_x64.vcproj
index 5c47a8ac8..b534a923e 100644
--- a/deps/v8/tools/visual_studio/d8_x64.vcproj
+++ b/deps/v8/tools/visual_studio/d8_x64.vcproj
@@ -50,7 +50,6 @@
<Tool
Name="VCLinkerTool"
AdditionalDependencies="winmm.lib Ws2_32.lib"
- TargetMachine="17"
/>
<Tool
Name="VCALinkTool"
@@ -71,9 +70,6 @@
Name="VCAppVerifierTool"
/>
<Tool
- Name="VCWebDeploymentTool"
- />
- <Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
@@ -112,7 +108,6 @@
<Tool
Name="VCLinkerTool"
AdditionalDependencies="winmm.lib Ws2_32.lib"
- TargetMachine="17"
/>
<Tool
Name="VCALinkTool"
@@ -133,9 +128,6 @@
Name="VCAppVerifierTool"
/>
<Tool
- Name="VCWebDeploymentTool"
- />
- <Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
@@ -146,6 +138,22 @@
<File
RelativePath="..\..\src\d8.cc"
>
+ <FileConfiguration
+ Name="Debug|x64"
+ >
+ <Tool
+ Name="VCCLCompilerTool"
+ DisableSpecificWarnings="4267"
+ />
+ </FileConfiguration>
+ <FileConfiguration
+ Name="Release|x64"
+ >
+ <Tool
+ Name="VCCLCompilerTool"
+ DisableSpecificWarnings="4267"
+ />
+ </FileConfiguration>
</File>
<File
RelativePath="..\..\src\d8.h"
diff --git a/deps/v8/tools/visual_studio/ia32.vsprops b/deps/v8/tools/visual_studio/ia32.vsprops
index 0399bbbe6..a12f13e74 100644
--- a/deps/v8/tools/visual_studio/ia32.vsprops
+++ b/deps/v8/tools/visual_studio/ia32.vsprops
@@ -10,4 +10,8 @@
Name="VCCLCompilerTool"
PreprocessorDefinitions="_USE_32BIT_TIME_T;V8_TARGET_ARCH_IA32;V8_NATIVE_REGEXP"
/>
+ <Tool
+ Name="VCLinkerTool"
+ TargetMachine="1"
+ />
</VisualStudioPropertySheet>
diff --git a/deps/v8/tools/visual_studio/v8_arm.vcproj b/deps/v8/tools/visual_studio/v8_arm.vcproj
index f8cbcc4c2..d21affe9d 100644
--- a/deps/v8/tools/visual_studio/v8_arm.vcproj
+++ b/deps/v8/tools/visual_studio/v8_arm.vcproj
@@ -1,223 +1,223 @@
-<?xml version="1.0" encoding="Windows-1252"?>
-<VisualStudioProject
- ProjectType="Visual C++"
- Version="8.00"
- Name="v8"
- ProjectGUID="{21E22961-22BF-4493-BD3A-868F93DA5179}"
- RootNamespace="v8"
- Keyword="Win32Proj"
- >
- <Platforms>
- <Platform
- Name="Win32"
- />
- </Platforms>
- <ToolFiles>
- </ToolFiles>
- <Configurations>
- <Configuration
- Name="Debug|Win32"
- ConfigurationType="4"
- InheritedPropertySheets=".\common.vsprops;.\arm.vsprops;.\debug.vsprops"
- >
- <Tool
- Name="VCPreBuildEventTool"
- />
- <Tool
- Name="VCCustomBuildTool"
- />
- <Tool
- Name="VCXMLDataGeneratorTool"
- />
- <Tool
- Name="VCWebServiceProxyGeneratorTool"
- />
- <Tool
- Name="VCMIDLTool"
- />
- <Tool
- Name="VCCLCompilerTool"
- />
- <Tool
- Name="VCManagedResourceCompilerTool"
- />
- <Tool
- Name="VCResourceCompilerTool"
- />
- <Tool
- Name="VCPreLinkEventTool"
- />
- <Tool
- Name="VCLibrarianTool"
- LinkLibraryDependencies="true"
- />
- <Tool
- Name="VCALinkTool"
- />
- <Tool
- Name="VCXDCMakeTool"
- />
- <Tool
- Name="VCBscMakeTool"
- />
- <Tool
- Name="VCFxCopTool"
- />
- <Tool
- Name="VCPostBuildEventTool"
- />
- </Configuration>
- <Configuration
- Name="Release|Win32"
- ConfigurationType="4"
- InheritedPropertySheets=".\common.vsprops;.\arm.vsprops;.\release.vsprops"
- >
- <Tool
- Name="VCPreBuildEventTool"
- />
- <Tool
- Name="VCCustomBuildTool"
- />
- <Tool
- Name="VCXMLDataGeneratorTool"
- />
- <Tool
- Name="VCWebServiceProxyGeneratorTool"
- />
- <Tool
- Name="VCMIDLTool"
- />
- <Tool
- Name="VCCLCompilerTool"
- />
- <Tool
- Name="VCManagedResourceCompilerTool"
- />
- <Tool
- Name="VCResourceCompilerTool"
- />
- <Tool
- Name="VCPreLinkEventTool"
- />
- <Tool
- Name="VCLibrarianTool"
- LinkLibraryDependencies="true"
- />
- <Tool
- Name="VCALinkTool"
- />
- <Tool
- Name="VCXDCMakeTool"
- />
- <Tool
- Name="VCBscMakeTool"
- />
- <Tool
- Name="VCFxCopTool"
- />
- <Tool
- Name="VCPostBuildEventTool"
- />
- </Configuration>
- </Configurations>
- <References>
- </References>
- <Files>
- <Filter
- Name="js"
- >
- <File
- RelativePath="..\..\src\apinatives.js"
- >
- </File>
- <File
- RelativePath="..\..\src\array.js"
- >
- </File>
- <File
- RelativePath="..\..\src\date-delay.js"
- >
- </File>
- <File
- RelativePath="..\..\src\debug-delay.js"
- >
- </File>
- <File
- RelativePath="..\..\src\macros.py"
- >
- </File>
- <File
- RelativePath="..\..\src\math.js"
- >
- </File>
- <File
- RelativePath="..\..\src\messages.js"
- >
- </File>
- <File
- RelativePath="..\..\src\mirror-delay.js"
- >
- </File>
- <File
- RelativePath="..\..\src\regexp-delay.js"
- >
- </File>
- <File
- RelativePath="..\..\src\json-delay.js"
- >
- </File>
- <File
- RelativePath="..\..\src\runtime.js"
- >
- </File>
- <File
- RelativePath="..\..\src\string.js"
- >
- </File>
- <File
- RelativePath="..\..\src\uri.js"
- >
- </File>
- <File
- RelativePath="..\..\src\v8natives.js"
- >
- <FileConfiguration
- Name="Debug|Win32"
- >
- <Tool
- Name="VCCustomBuildTool"
- Description="Processing js files..."
- CommandLine=".\js2c.cmd ..\..\src &quot;$(IntDir)\DerivedSources&quot;"
- AdditionalDependencies="..\..\src\macros.py;..\..\src\runtime.js;..\..\src\v8natives.js;..\..\src\array.js;..\..\src\string.js;..\..\src\uri.js;..\..\src\math.js;..\..\src\messages.js;..\..\src\apinatives.js;..\..\src\debug-delay.js;..\..\src\mirror-delay.js;..\..\src\date-delay.js;..\..\src\regexp-delay.js;..\..\src\json-delay.js"
- Outputs="$(IntDir)\DerivedSources\natives.cc;$(IntDir)\DerivedSources\natives-empty.cc"
- />
- </FileConfiguration>
- <FileConfiguration
- Name="Release|Win32"
- >
- <Tool
- Name="VCCustomBuildTool"
- Description="Processing js files..."
- CommandLine=".\js2c.cmd ..\..\src &quot;$(IntDir)\DerivedSources&quot;"
- AdditionalDependencies="..\..\src\macros.py;..\..\src\runtime.js;..\..\src\v8natives.js;..\..\src\array.js;..\..\src\string.js;..\..\src\uri.js;..\..\src\math.js;..\..\src\messages.js;..\..\src\apinatives.js;..\..\src\debug-delay.js;..\..\src\mirror-delay.js;..\..\src\date-delay.js;..\..\src\regexp-delay.js;..\..\src\json-delay.js"
- Outputs="$(IntDir)\DerivedSources\natives.cc;$(IntDir)\DerivedSources\natives-empty.cc"
- />
- </FileConfiguration>
- </File>
- </Filter>
- <Filter
- Name="generated files"
- >
- <File
- RelativePath="$(IntDir)\DerivedSources\natives.cc"
- >
- </File>
- </Filter>
- <File
- RelativePath="..\..\src\snapshot-empty.cc"
- >
- </File>
- </Files>
- <Globals>
- </Globals>
-</VisualStudioProject>
+<?xml version="1.0" encoding="Windows-1252"?>
+<VisualStudioProject
+ ProjectType="Visual C++"
+ Version="8.00"
+ Name="v8"
+ ProjectGUID="{21E22961-22BF-4493-BD3A-868F93DA5179}"
+ RootNamespace="v8"
+ Keyword="Win32Proj"
+ >
+ <Platforms>
+ <Platform
+ Name="Win32"
+ />
+ </Platforms>
+ <ToolFiles>
+ </ToolFiles>
+ <Configurations>
+ <Configuration
+ Name="Debug|Win32"
+ ConfigurationType="4"
+ InheritedPropertySheets=".\common.vsprops;.\arm.vsprops;.\debug.vsprops"
+ >
+ <Tool
+ Name="VCPreBuildEventTool"
+ />
+ <Tool
+ Name="VCCustomBuildTool"
+ />
+ <Tool
+ Name="VCXMLDataGeneratorTool"
+ />
+ <Tool
+ Name="VCWebServiceProxyGeneratorTool"
+ />
+ <Tool
+ Name="VCMIDLTool"
+ />
+ <Tool
+ Name="VCCLCompilerTool"
+ />
+ <Tool
+ Name="VCManagedResourceCompilerTool"
+ />
+ <Tool
+ Name="VCResourceCompilerTool"
+ />
+ <Tool
+ Name="VCPreLinkEventTool"
+ />
+ <Tool
+ Name="VCLibrarianTool"
+ LinkLibraryDependencies="true"
+ />
+ <Tool
+ Name="VCALinkTool"
+ />
+ <Tool
+ Name="VCXDCMakeTool"
+ />
+ <Tool
+ Name="VCBscMakeTool"
+ />
+ <Tool
+ Name="VCFxCopTool"
+ />
+ <Tool
+ Name="VCPostBuildEventTool"
+ />
+ </Configuration>
+ <Configuration
+ Name="Release|Win32"
+ ConfigurationType="4"
+ InheritedPropertySheets=".\common.vsprops;.\arm.vsprops;.\release.vsprops"
+ >
+ <Tool
+ Name="VCPreBuildEventTool"
+ />
+ <Tool
+ Name="VCCustomBuildTool"
+ />
+ <Tool
+ Name="VCXMLDataGeneratorTool"
+ />
+ <Tool
+ Name="VCWebServiceProxyGeneratorTool"
+ />
+ <Tool
+ Name="VCMIDLTool"
+ />
+ <Tool
+ Name="VCCLCompilerTool"
+ />
+ <Tool
+ Name="VCManagedResourceCompilerTool"
+ />
+ <Tool
+ Name="VCResourceCompilerTool"
+ />
+ <Tool
+ Name="VCPreLinkEventTool"
+ />
+ <Tool
+ Name="VCLibrarianTool"
+ LinkLibraryDependencies="true"
+ />
+ <Tool
+ Name="VCALinkTool"
+ />
+ <Tool
+ Name="VCXDCMakeTool"
+ />
+ <Tool
+ Name="VCBscMakeTool"
+ />
+ <Tool
+ Name="VCFxCopTool"
+ />
+ <Tool
+ Name="VCPostBuildEventTool"
+ />
+ </Configuration>
+ </Configurations>
+ <References>
+ </References>
+ <Files>
+ <Filter
+ Name="js"
+ >
+ <File
+ RelativePath="..\..\src\apinatives.js"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\array.js"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\date-delay.js"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\debug-delay.js"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\macros.py"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\math.js"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\messages.js"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\mirror-delay.js"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\regexp-delay.js"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\json-delay.js"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\runtime.js"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\string.js"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\uri.js"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\v8natives.js"
+ >
+ <FileConfiguration
+ Name="Debug|Win32"
+ >
+ <Tool
+ Name="VCCustomBuildTool"
+ Description="Processing js files..."
+ CommandLine=".\js2c.cmd ..\..\src &quot;$(IntDir)\DerivedSources&quot;"
+ AdditionalDependencies="..\..\src\macros.py;..\..\src\runtime.js;..\..\src\v8natives.js;..\..\src\array.js;..\..\src\string.js;..\..\src\uri.js;..\..\src\math.js;..\..\src\messages.js;..\..\src\apinatives.js;..\..\src\debug-delay.js;..\..\src\mirror-delay.js;..\..\src\date-delay.js;..\..\src\regexp-delay.js;..\..\src\json-delay.js"
+ Outputs="$(IntDir)\DerivedSources\natives.cc;$(IntDir)\DerivedSources\natives-empty.cc"
+ />
+ </FileConfiguration>
+ <FileConfiguration
+ Name="Release|Win32"
+ >
+ <Tool
+ Name="VCCustomBuildTool"
+ Description="Processing js files..."
+ CommandLine=".\js2c.cmd ..\..\src &quot;$(IntDir)\DerivedSources&quot;"
+ AdditionalDependencies="..\..\src\macros.py;..\..\src\runtime.js;..\..\src\v8natives.js;..\..\src\array.js;..\..\src\string.js;..\..\src\uri.js;..\..\src\math.js;..\..\src\messages.js;..\..\src\apinatives.js;..\..\src\debug-delay.js;..\..\src\mirror-delay.js;..\..\src\date-delay.js;..\..\src\regexp-delay.js;..\..\src\json-delay.js"
+ Outputs="$(IntDir)\DerivedSources\natives.cc;$(IntDir)\DerivedSources\natives-empty.cc"
+ />
+ </FileConfiguration>
+ </File>
+ </Filter>
+ <Filter
+ Name="generated files"
+ >
+ <File
+ RelativePath="$(IntDir)\DerivedSources\natives.cc"
+ >
+ </File>
+ </Filter>
+ <File
+ RelativePath="..\..\src\snapshot-empty.cc"
+ >
+ </File>
+ </Files>
+ <Globals>
+ </Globals>
+</VisualStudioProject>
diff --git a/deps/v8/tools/visual_studio/v8_base.vcproj b/deps/v8/tools/visual_studio/v8_base.vcproj
index 6b4735975..e58e8ff31 100644
--- a/deps/v8/tools/visual_studio/v8_base.vcproj
+++ b/deps/v8/tools/visual_studio/v8_base.vcproj
@@ -337,6 +337,14 @@
>
</File>
<File
+ RelativePath="..\..\src\data-flow.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\data-flow.h"
+ >
+ </File>
+ <File
RelativePath="..\..\src\dateparser.cc"
>
</File>
@@ -436,6 +444,18 @@
RelativePath="..\..\src\frames.h"
>
</File>
+ <File
+ RelativePath="..\..\src\ia32\full-codegen-ia32.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\full-codegen.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\full-codegen.h"
+ >
+ </File>
<File
RelativePath="..\..\src\func-name-inferrer.cc"
>
diff --git a/deps/v8/tools/visual_studio/v8_base_arm.vcproj b/deps/v8/tools/visual_studio/v8_base_arm.vcproj
index afb4f74b7..4b37b538c 100644
--- a/deps/v8/tools/visual_studio/v8_base_arm.vcproj
+++ b/deps/v8/tools/visual_studio/v8_base_arm.vcproj
@@ -345,6 +345,14 @@
>
</File>
<File
+ RelativePath="..\..\src\data-flow.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\data-flow.h"
+ >
+ </File>
+ <File
RelativePath="..\..\src\dateparser.cc"
>
</File>
@@ -444,6 +452,18 @@
RelativePath="..\..\src\frames.h"
>
</File>
+ <File
+ RelativePath="..\..\src\arm\full-codegen-arm.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\full-codegen.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\full-codegen.h"
+ >
+ </File>
<File
RelativePath="..\..\src\func-name-inferrer.cc"
>
diff --git a/deps/v8/tools/visual_studio/v8_base_x64.vcproj b/deps/v8/tools/visual_studio/v8_base_x64.vcproj
index a8c8b55fa..b6d5c7d82 100644
--- a/deps/v8/tools/visual_studio/v8_base_x64.vcproj
+++ b/deps/v8/tools/visual_studio/v8_base_x64.vcproj
@@ -337,6 +337,14 @@
>
</File>
<File
+ RelativePath="..\..\src\data-flow.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\data-flow.h"
+ >
+ </File>
+ <File
RelativePath="..\..\src\dateparser.cc"
>
</File>
@@ -389,6 +397,18 @@
>
</File>
<File
+ RelativePath="..\..\src\x64\fast-codegen-x64.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\fast-codegen.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\fast-codegen.h"
+ >
+ </File>
+ <File
RelativePath="..\..\src\flags.cc"
>
</File>
@@ -425,6 +445,19 @@
>
</File>
<File
+ RelativePath="..\..\src\x64\full-codegen-x64.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\full-codegen.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\full-codegen.h"
+ >
+ </File>
+ <File
+
RelativePath="..\..\src\func-name-inferrer.cc"
>
</File>
diff --git a/deps/v8/tools/visual_studio/v8_cctest.vcproj b/deps/v8/tools/visual_studio/v8_cctest.vcproj
index d1cf2e84c..9acb835c0 100644
--- a/deps/v8/tools/visual_studio/v8_cctest.vcproj
+++ b/deps/v8/tools/visual_studio/v8_cctest.vcproj
@@ -70,9 +70,6 @@
Name="VCAppVerifierTool"
/>
<Tool
- Name="VCWebDeploymentTool"
- />
- <Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
@@ -131,9 +128,6 @@
Name="VCAppVerifierTool"
/>
<Tool
- Name="VCWebDeploymentTool"
- />
- <Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
diff --git a/deps/v8/tools/visual_studio/v8_cctest_arm.vcproj b/deps/v8/tools/visual_studio/v8_cctest_arm.vcproj
index 968d13472..7ff953e24 100644
--- a/deps/v8/tools/visual_studio/v8_cctest_arm.vcproj
+++ b/deps/v8/tools/visual_studio/v8_cctest_arm.vcproj
@@ -70,9 +70,6 @@
Name="VCAppVerifierTool"
/>
<Tool
- Name="VCWebDeploymentTool"
- />
- <Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
@@ -131,9 +128,6 @@
Name="VCAppVerifierTool"
/>
<Tool
- Name="VCWebDeploymentTool"
- />
- <Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
diff --git a/deps/v8/tools/visual_studio/v8_cctest_x64.vcproj b/deps/v8/tools/visual_studio/v8_cctest_x64.vcproj
index 78db1a4aa..1e9044b1b 100644
--- a/deps/v8/tools/visual_studio/v8_cctest_x64.vcproj
+++ b/deps/v8/tools/visual_studio/v8_cctest_x64.vcproj
@@ -50,7 +50,6 @@
<Tool
Name="VCLinkerTool"
AdditionalDependencies="winmm.lib Ws2_32.lib"
- TargetMachine="17"
/>
<Tool
Name="VCALinkTool"
@@ -71,9 +70,6 @@
Name="VCAppVerifierTool"
/>
<Tool
- Name="VCWebDeploymentTool"
- />
- <Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
@@ -112,7 +108,6 @@
<Tool
Name="VCLinkerTool"
AdditionalDependencies="winmm.lib Ws2_32.lib"
- TargetMachine="17"
/>
<Tool
Name="VCALinkTool"
@@ -133,9 +128,6 @@
Name="VCAppVerifierTool"
/>
<Tool
- Name="VCWebDeploymentTool"
- />
- <Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
@@ -180,10 +172,6 @@
>
</File>
<File
- RelativePath="..\..\test\cctest\test-disasm-x64.cc"
- >
- </File>
- <File
RelativePath="..\..\test\cctest\test-flags.cc"
>
</File>
diff --git a/deps/v8/tools/visual_studio/v8_mksnapshot.vcproj b/deps/v8/tools/visual_studio/v8_mksnapshot.vcproj
index 00950b069..cb9e0483b 100644
--- a/deps/v8/tools/visual_studio/v8_mksnapshot.vcproj
+++ b/deps/v8/tools/visual_studio/v8_mksnapshot.vcproj
@@ -70,9 +70,6 @@
Name="VCAppVerifierTool"
/>
<Tool
- Name="VCWebDeploymentTool"
- />
- <Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
@@ -131,9 +128,6 @@
Name="VCAppVerifierTool"
/>
<Tool
- Name="VCWebDeploymentTool"
- />
- <Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
diff --git a/deps/v8/tools/visual_studio/v8_mksnapshot_x64.vcproj b/deps/v8/tools/visual_studio/v8_mksnapshot_x64.vcproj
index 1c460e4db..e684af03b 100644
--- a/deps/v8/tools/visual_studio/v8_mksnapshot_x64.vcproj
+++ b/deps/v8/tools/visual_studio/v8_mksnapshot_x64.vcproj
@@ -70,9 +70,6 @@
Name="VCAppVerifierTool"
/>
<Tool
- Name="VCWebDeploymentTool"
- />
- <Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
@@ -131,9 +128,6 @@
Name="VCAppVerifierTool"
/>
<Tool
- Name="VCWebDeploymentTool"
- />
- <Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
diff --git a/deps/v8/tools/visual_studio/v8_process_sample.vcproj b/deps/v8/tools/visual_studio/v8_process_sample.vcproj
index d94966b33..dc3fb3a01 100644
--- a/deps/v8/tools/visual_studio/v8_process_sample.vcproj
+++ b/deps/v8/tools/visual_studio/v8_process_sample.vcproj
@@ -70,9 +70,6 @@
Name="VCAppVerifierTool"
/>
<Tool
- Name="VCWebDeploymentTool"
- />
- <Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
@@ -131,9 +128,6 @@
Name="VCAppVerifierTool"
/>
<Tool
- Name="VCWebDeploymentTool"
- />
- <Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
diff --git a/deps/v8/tools/visual_studio/v8_process_sample_arm.vcproj b/deps/v8/tools/visual_studio/v8_process_sample_arm.vcproj
index 7320231cf..2d63f69e9 100644
--- a/deps/v8/tools/visual_studio/v8_process_sample_arm.vcproj
+++ b/deps/v8/tools/visual_studio/v8_process_sample_arm.vcproj
@@ -1,151 +1,145 @@
-<?xml version="1.0" encoding="Windows-1252"?>
-<VisualStudioProject
- ProjectType="Visual C++"
- Version="8.00"
- Name="v8_process_sample"
- ProjectGUID="{EF019874-D38A-40E3-B17C-DB5923F0A79C}"
- RootNamespace="v8_process_sample"
- Keyword="Win32Proj"
- >
- <Platforms>
- <Platform
- Name="Win32"
- />
- </Platforms>
- <ToolFiles>
- </ToolFiles>
- <Configurations>
- <Configuration
- Name="Debug|Win32"
- ConfigurationType="1"
- InheritedPropertySheets=".\common.vsprops;.\arm.vsprops;.\debug.vsprops"
- >
- <Tool
- Name="VCPreBuildEventTool"
- />
- <Tool
- Name="VCCustomBuildTool"
- />
- <Tool
- Name="VCXMLDataGeneratorTool"
- />
- <Tool
- Name="VCWebServiceProxyGeneratorTool"
- />
- <Tool
- Name="VCMIDLTool"
- />
- <Tool
- Name="VCCLCompilerTool"
- />
- <Tool
- Name="VCManagedResourceCompilerTool"
- />
- <Tool
- Name="VCResourceCompilerTool"
- />
- <Tool
- Name="VCPreLinkEventTool"
- />
- <Tool
- Name="VCLinkerTool"
- AdditionalDependencies="winmm.lib Ws2_32.lib"
- />
- <Tool
- Name="VCALinkTool"
- />
- <Tool
- Name="VCManifestTool"
- />
- <Tool
- Name="VCXDCMakeTool"
- />
- <Tool
- Name="VCBscMakeTool"
- />
- <Tool
- Name="VCFxCopTool"
- />
- <Tool
- Name="VCAppVerifierTool"
- />
- <Tool
- Name="VCWebDeploymentTool"
- />
- <Tool
- Name="VCPostBuildEventTool"
- />
- </Configuration>
- <Configuration
- Name="Release|Win32"
- ConfigurationType="1"
- InheritedPropertySheets=".\common.vsprops;.\arm.vsprops;.\release.vsprops"
- >
- <Tool
- Name="VCPreBuildEventTool"
- />
- <Tool
- Name="VCCustomBuildTool"
- />
- <Tool
- Name="VCXMLDataGeneratorTool"
- />
- <Tool
- Name="VCWebServiceProxyGeneratorTool"
- />
- <Tool
- Name="VCMIDLTool"
- />
- <Tool
- Name="VCCLCompilerTool"
- />
- <Tool
- Name="VCManagedResourceCompilerTool"
- />
- <Tool
- Name="VCResourceCompilerTool"
- />
- <Tool
- Name="VCPreLinkEventTool"
- />
- <Tool
- Name="VCLinkerTool"
- AdditionalDependencies="winmm.lib Ws2_32.lib"
- />
- <Tool
- Name="VCALinkTool"
- />
- <Tool
- Name="VCManifestTool"
- />
- <Tool
- Name="VCXDCMakeTool"
- />
- <Tool
- Name="VCBscMakeTool"
- />
- <Tool
- Name="VCFxCopTool"
- />
- <Tool
- Name="VCAppVerifierTool"
- />
- <Tool
- Name="VCWebDeploymentTool"
- />
- <Tool
- Name="VCPostBuildEventTool"
- />
- </Configuration>
- </Configurations>
- <References>
- </References>
- <Files>
- <File
- RelativePath="..\..\samples\process.cc"
- >
- </File>
- </Files>
- <Globals>
- </Globals>
-</VisualStudioProject>
+<?xml version="1.0" encoding="Windows-1252"?>
+<VisualStudioProject
+ ProjectType="Visual C++"
+ Version="8.00"
+ Name="v8_process_sample"
+ ProjectGUID="{EF019874-D38A-40E3-B17C-DB5923F0A79C}"
+ RootNamespace="v8_process_sample"
+ Keyword="Win32Proj"
+ >
+ <Platforms>
+ <Platform
+ Name="Win32"
+ />
+ </Platforms>
+ <ToolFiles>
+ </ToolFiles>
+ <Configurations>
+ <Configuration
+ Name="Debug|Win32"
+ ConfigurationType="1"
+ InheritedPropertySheets=".\common.vsprops;.\arm.vsprops;.\debug.vsprops"
+ >
+ <Tool
+ Name="VCPreBuildEventTool"
+ />
+ <Tool
+ Name="VCCustomBuildTool"
+ />
+ <Tool
+ Name="VCXMLDataGeneratorTool"
+ />
+ <Tool
+ Name="VCWebServiceProxyGeneratorTool"
+ />
+ <Tool
+ Name="VCMIDLTool"
+ />
+ <Tool
+ Name="VCCLCompilerTool"
+ />
+ <Tool
+ Name="VCManagedResourceCompilerTool"
+ />
+ <Tool
+ Name="VCResourceCompilerTool"
+ />
+ <Tool
+ Name="VCPreLinkEventTool"
+ />
+ <Tool
+ Name="VCLinkerTool"
+ AdditionalDependencies="winmm.lib Ws2_32.lib"
+ />
+ <Tool
+ Name="VCALinkTool"
+ />
+ <Tool
+ Name="VCManifestTool"
+ />
+ <Tool
+ Name="VCXDCMakeTool"
+ />
+ <Tool
+ Name="VCBscMakeTool"
+ />
+ <Tool
+ Name="VCFxCopTool"
+ />
+ <Tool
+ Name="VCAppVerifierTool"
+ />
+ <Tool
+ Name="VCPostBuildEventTool"
+ />
+ </Configuration>
+ <Configuration
+ Name="Release|Win32"
+ ConfigurationType="1"
+ InheritedPropertySheets=".\common.vsprops;.\arm.vsprops;.\release.vsprops"
+ >
+ <Tool
+ Name="VCPreBuildEventTool"
+ />
+ <Tool
+ Name="VCCustomBuildTool"
+ />
+ <Tool
+ Name="VCXMLDataGeneratorTool"
+ />
+ <Tool
+ Name="VCWebServiceProxyGeneratorTool"
+ />
+ <Tool
+ Name="VCMIDLTool"
+ />
+ <Tool
+ Name="VCCLCompilerTool"
+ />
+ <Tool
+ Name="VCManagedResourceCompilerTool"
+ />
+ <Tool
+ Name="VCResourceCompilerTool"
+ />
+ <Tool
+ Name="VCPreLinkEventTool"
+ />
+ <Tool
+ Name="VCLinkerTool"
+ AdditionalDependencies="winmm.lib Ws2_32.lib"
+ />
+ <Tool
+ Name="VCALinkTool"
+ />
+ <Tool
+ Name="VCManifestTool"
+ />
+ <Tool
+ Name="VCXDCMakeTool"
+ />
+ <Tool
+ Name="VCBscMakeTool"
+ />
+ <Tool
+ Name="VCFxCopTool"
+ />
+ <Tool
+ Name="VCAppVerifierTool"
+ />
+ <Tool
+ Name="VCPostBuildEventTool"
+ />
+ </Configuration>
+ </Configurations>
+ <References>
+ </References>
+ <Files>
+ <File
+ RelativePath="..\..\samples\process.cc"
+ >
+ </File>
+ </Files>
+ <Globals>
+ </Globals>
+</VisualStudioProject>
diff --git a/deps/v8/tools/visual_studio/v8_process_sample_x64.vcproj b/deps/v8/tools/visual_studio/v8_process_sample_x64.vcproj
index 81adbe0fb..1d7f01aeb 100644
--- a/deps/v8/tools/visual_studio/v8_process_sample_x64.vcproj
+++ b/deps/v8/tools/visual_studio/v8_process_sample_x64.vcproj
@@ -70,9 +70,6 @@
Name="VCAppVerifierTool"
/>
<Tool
- Name="VCWebDeploymentTool"
- />
- <Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
@@ -131,9 +128,6 @@
Name="VCAppVerifierTool"
/>
<Tool
- Name="VCWebDeploymentTool"
- />
- <Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
@@ -144,6 +138,22 @@
<File
RelativePath="..\..\samples\process.cc"
>
+ <FileConfiguration
+ Name="Debug|x64"
+ >
+ <Tool
+ Name="VCCLCompilerTool"
+ DisableSpecificWarnings="4267"
+ />
+ </FileConfiguration>
+ <FileConfiguration
+ Name="Release|x64"
+ >
+ <Tool
+ Name="VCCLCompilerTool"
+ DisableSpecificWarnings="4267"
+ />
+ </FileConfiguration>
</File>
</Files>
<Globals>
diff --git a/deps/v8/tools/visual_studio/v8_shell_sample.vcproj b/deps/v8/tools/visual_studio/v8_shell_sample.vcproj
index 2cbd22df6..b1e5f0178 100644
--- a/deps/v8/tools/visual_studio/v8_shell_sample.vcproj
+++ b/deps/v8/tools/visual_studio/v8_shell_sample.vcproj
@@ -70,9 +70,6 @@
Name="VCAppVerifierTool"
/>
<Tool
- Name="VCWebDeploymentTool"
- />
- <Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
@@ -131,9 +128,6 @@
Name="VCAppVerifierTool"
/>
<Tool
- Name="VCWebDeploymentTool"
- />
- <Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
diff --git a/deps/v8/tools/visual_studio/v8_shell_sample_arm.vcproj b/deps/v8/tools/visual_studio/v8_shell_sample_arm.vcproj
index ba7e0e055..a14c91a45 100644
--- a/deps/v8/tools/visual_studio/v8_shell_sample_arm.vcproj
+++ b/deps/v8/tools/visual_studio/v8_shell_sample_arm.vcproj
@@ -1,151 +1,145 @@
-<?xml version="1.0" encoding="Windows-1252"?>
-<VisualStudioProject
- ProjectType="Visual C++"
- Version="8.00"
- Name="v8_shell_sample"
- ProjectGUID="{2DE20FFA-6F5E-48D9-84D8-09B044A5B119}"
- RootNamespace="v8_shell_sample"
- Keyword="Win32Proj"
- >
- <Platforms>
- <Platform
- Name="Win32"
- />
- </Platforms>
- <ToolFiles>
- </ToolFiles>
- <Configurations>
- <Configuration
- Name="Debug|Win32"
- ConfigurationType="1"
- InheritedPropertySheets=".\common.vsprops;.\arm.vsprops;.\debug.vsprops"
- >
- <Tool
- Name="VCPreBuildEventTool"
- />
- <Tool
- Name="VCCustomBuildTool"
- />
- <Tool
- Name="VCXMLDataGeneratorTool"
- />
- <Tool
- Name="VCWebServiceProxyGeneratorTool"
- />
- <Tool
- Name="VCMIDLTool"
- />
- <Tool
- Name="VCCLCompilerTool"
- />
- <Tool
- Name="VCManagedResourceCompilerTool"
- />
- <Tool
- Name="VCResourceCompilerTool"
- />
- <Tool
- Name="VCPreLinkEventTool"
- />
- <Tool
- Name="VCLinkerTool"
- AdditionalDependencies="winmm.lib Ws2_32.lib"
- />
- <Tool
- Name="VCALinkTool"
- />
- <Tool
- Name="VCManifestTool"
- />
- <Tool
- Name="VCXDCMakeTool"
- />
- <Tool
- Name="VCBscMakeTool"
- />
- <Tool
- Name="VCFxCopTool"
- />
- <Tool
- Name="VCAppVerifierTool"
- />
- <Tool
- Name="VCWebDeploymentTool"
- />
- <Tool
- Name="VCPostBuildEventTool"
- />
- </Configuration>
- <Configuration
- Name="Release|Win32"
- ConfigurationType="1"
- InheritedPropertySheets=".\common.vsprops;.\arm.vsprops;.\release.vsprops"
- >
- <Tool
- Name="VCPreBuildEventTool"
- />
- <Tool
- Name="VCCustomBuildTool"
- />
- <Tool
- Name="VCXMLDataGeneratorTool"
- />
- <Tool
- Name="VCWebServiceProxyGeneratorTool"
- />
- <Tool
- Name="VCMIDLTool"
- />
- <Tool
- Name="VCCLCompilerTool"
- />
- <Tool
- Name="VCManagedResourceCompilerTool"
- />
- <Tool
- Name="VCResourceCompilerTool"
- />
- <Tool
- Name="VCPreLinkEventTool"
- />
- <Tool
- Name="VCLinkerTool"
- AdditionalDependencies="winmm.lib Ws2_32.lib"
- />
- <Tool
- Name="VCALinkTool"
- />
- <Tool
- Name="VCManifestTool"
- />
- <Tool
- Name="VCXDCMakeTool"
- />
- <Tool
- Name="VCBscMakeTool"
- />
- <Tool
- Name="VCFxCopTool"
- />
- <Tool
- Name="VCAppVerifierTool"
- />
- <Tool
- Name="VCWebDeploymentTool"
- />
- <Tool
- Name="VCPostBuildEventTool"
- />
- </Configuration>
- </Configurations>
- <References>
- </References>
- <Files>
- <File
- RelativePath="..\..\samples\shell.cc"
- >
- </File>
- </Files>
- <Globals>
- </Globals>
-</VisualStudioProject>
+<?xml version="1.0" encoding="Windows-1252"?>
+<VisualStudioProject
+ ProjectType="Visual C++"
+ Version="8.00"
+ Name="v8_shell_sample"
+ ProjectGUID="{2DE20FFA-6F5E-48D9-84D8-09B044A5B119}"
+ RootNamespace="v8_shell_sample"
+ Keyword="Win32Proj"
+ >
+ <Platforms>
+ <Platform
+ Name="Win32"
+ />
+ </Platforms>
+ <ToolFiles>
+ </ToolFiles>
+ <Configurations>
+ <Configuration
+ Name="Debug|Win32"
+ ConfigurationType="1"
+ InheritedPropertySheets=".\common.vsprops;.\arm.vsprops;.\debug.vsprops"
+ >
+ <Tool
+ Name="VCPreBuildEventTool"
+ />
+ <Tool
+ Name="VCCustomBuildTool"
+ />
+ <Tool
+ Name="VCXMLDataGeneratorTool"
+ />
+ <Tool
+ Name="VCWebServiceProxyGeneratorTool"
+ />
+ <Tool
+ Name="VCMIDLTool"
+ />
+ <Tool
+ Name="VCCLCompilerTool"
+ />
+ <Tool
+ Name="VCManagedResourceCompilerTool"
+ />
+ <Tool
+ Name="VCResourceCompilerTool"
+ />
+ <Tool
+ Name="VCPreLinkEventTool"
+ />
+ <Tool
+ Name="VCLinkerTool"
+ AdditionalDependencies="winmm.lib Ws2_32.lib"
+ />
+ <Tool
+ Name="VCALinkTool"
+ />
+ <Tool
+ Name="VCManifestTool"
+ />
+ <Tool
+ Name="VCXDCMakeTool"
+ />
+ <Tool
+ Name="VCBscMakeTool"
+ />
+ <Tool
+ Name="VCFxCopTool"
+ />
+ <Tool
+ Name="VCAppVerifierTool"
+ />
+ <Tool
+ Name="VCPostBuildEventTool"
+ />
+ </Configuration>
+ <Configuration
+ Name="Release|Win32"
+ ConfigurationType="1"
+ InheritedPropertySheets=".\common.vsprops;.\arm.vsprops;.\release.vsprops"
+ >
+ <Tool
+ Name="VCPreBuildEventTool"
+ />
+ <Tool
+ Name="VCCustomBuildTool"
+ />
+ <Tool
+ Name="VCXMLDataGeneratorTool"
+ />
+ <Tool
+ Name="VCWebServiceProxyGeneratorTool"
+ />
+ <Tool
+ Name="VCMIDLTool"
+ />
+ <Tool
+ Name="VCCLCompilerTool"
+ />
+ <Tool
+ Name="VCManagedResourceCompilerTool"
+ />
+ <Tool
+ Name="VCResourceCompilerTool"
+ />
+ <Tool
+ Name="VCPreLinkEventTool"
+ />
+ <Tool
+ Name="VCLinkerTool"
+ AdditionalDependencies="winmm.lib Ws2_32.lib"
+ />
+ <Tool
+ Name="VCALinkTool"
+ />
+ <Tool
+ Name="VCManifestTool"
+ />
+ <Tool
+ Name="VCXDCMakeTool"
+ />
+ <Tool
+ Name="VCBscMakeTool"
+ />
+ <Tool
+ Name="VCFxCopTool"
+ />
+ <Tool
+ Name="VCAppVerifierTool"
+ />
+ <Tool
+ Name="VCPostBuildEventTool"
+ />
+ </Configuration>
+ </Configurations>
+ <References>
+ </References>
+ <Files>
+ <File
+ RelativePath="..\..\samples\shell.cc"
+ >
+ </File>
+ </Files>
+ <Globals>
+ </Globals>
+</VisualStudioProject>
diff --git a/deps/v8/tools/visual_studio/v8_shell_sample_x64.vcproj b/deps/v8/tools/visual_studio/v8_shell_sample_x64.vcproj
index e1d516486..44d7b12c7 100644
--- a/deps/v8/tools/visual_studio/v8_shell_sample_x64.vcproj
+++ b/deps/v8/tools/visual_studio/v8_shell_sample_x64.vcproj
@@ -50,7 +50,6 @@
<Tool
Name="VCLinkerTool"
AdditionalDependencies="winmm.lib Ws2_32.lib"
- TargetMachine="17"
/>
<Tool
Name="VCALinkTool"
@@ -71,9 +70,6 @@
Name="VCAppVerifierTool"
/>
<Tool
- Name="VCWebDeploymentTool"
- />
- <Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
@@ -112,7 +108,6 @@
<Tool
Name="VCLinkerTool"
AdditionalDependencies="winmm.lib Ws2_32.lib"
- TargetMachine="17"
/>
<Tool
Name="VCALinkTool"
@@ -133,9 +128,6 @@
Name="VCAppVerifierTool"
/>
<Tool
- Name="VCWebDeploymentTool"
- />
- <Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
@@ -146,6 +138,22 @@
<File
RelativePath="..\..\samples\shell.cc"
>
+ <FileConfiguration
+ Name="Debug|x64"
+ >
+ <Tool
+ Name="VCCLCompilerTool"
+ DisableSpecificWarnings="4267"
+ />
+ </FileConfiguration>
+ <FileConfiguration
+ Name="Release|x64"
+ >
+ <Tool
+ Name="VCCLCompilerTool"
+ DisableSpecificWarnings="4267"
+ />
+ </FileConfiguration>
</File>
</Files>
<Globals>
diff --git a/deps/v8/tools/visual_studio/x64.vsprops b/deps/v8/tools/visual_studio/x64.vsprops
index 7587acfe9..3371d54c9 100644
--- a/deps/v8/tools/visual_studio/x64.vsprops
+++ b/deps/v8/tools/visual_studio/x64.vsprops
@@ -10,4 +10,8 @@
Name="VCCLCompilerTool"
PreprocessorDefinitions="V8_TARGET_ARCH_X64;V8_NATIVE_REGEXP"
/>
+ <Tool
+ Name="VCLinkerTool"
+ TargetMachine="17"
+ />
</VisualStudioPropertySheet>