summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRyan Dahl <ry@tinyclouds.org>2010-02-19 10:29:41 -0800
committerRyan Dahl <ry@tinyclouds.org>2010-02-19 10:40:48 -0800
commitbcf163da27676e26108ec430a392baee84f2831c (patch)
tree1afc6af6d5b53247b25ecb9f2f14f88c9fd532f5
parent764783560ed8d2cd523e715567938f2c3afbb8d0 (diff)
downloadnode-bcf163da27676e26108ec430a392baee84f2831c.tar.gz
Upgrade V8 to 2.1.1
-rw-r--r--deps/v8/AUTHORS2
-rw-r--r--deps/v8/ChangeLog14
-rw-r--r--deps/v8/SConstruct35
-rw-r--r--deps/v8/include/v8.h122
-rw-r--r--deps/v8/samples/lineprocessor.cc2
-rwxr-xr-xdeps/v8/src/SConscript20
-rw-r--r--deps/v8/src/accessors.cc37
-rw-r--r--deps/v8/src/api.cc164
-rw-r--r--deps/v8/src/arm/assembler-arm.cc191
-rw-r--r--deps/v8/src/arm/assembler-arm.h9
-rw-r--r--deps/v8/src/arm/assembler-thumb2-inl.h30
-rw-r--r--deps/v8/src/arm/assembler-thumb2.cc227
-rw-r--r--deps/v8/src/arm/assembler-thumb2.h25
-rw-r--r--deps/v8/src/arm/builtins-arm.cc143
-rw-r--r--deps/v8/src/arm/codegen-arm.cc902
-rw-r--r--deps/v8/src/arm/codegen-arm.h100
-rw-r--r--deps/v8/src/arm/debug-arm.cc10
-rw-r--r--deps/v8/src/arm/disasm-arm.cc25
-rw-r--r--deps/v8/src/arm/fast-codegen-arm.cc188
-rw-r--r--deps/v8/src/arm/full-codegen-arm.cc182
-rw-r--r--deps/v8/src/arm/ic-arm.cc214
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc264
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h127
-rw-r--r--deps/v8/src/arm/simulator-arm.cc29
-rw-r--r--deps/v8/src/arm/stub-cache-arm.cc467
-rw-r--r--deps/v8/src/arm/virtual-frame-arm.cc12
-rw-r--r--deps/v8/src/arm/virtual-frame-arm.h9
-rw-r--r--deps/v8/src/array.js5
-rw-r--r--deps/v8/src/assembler.cc10
-rw-r--r--deps/v8/src/assembler.h16
-rw-r--r--deps/v8/src/ast.h39
-rw-r--r--deps/v8/src/bootstrapper.cc177
-rw-r--r--deps/v8/src/bootstrapper.h3
-rw-r--r--deps/v8/src/builtins.cc499
-rw-r--r--deps/v8/src/builtins.h10
-rw-r--r--deps/v8/src/checks.h4
-rw-r--r--deps/v8/src/code-stubs.cc16
-rw-r--r--deps/v8/src/code-stubs.h5
-rw-r--r--deps/v8/src/codegen-inl.h41
-rw-r--r--deps/v8/src/codegen.cc75
-rw-r--r--deps/v8/src/codegen.h41
-rwxr-xr-xdeps/v8/src/compiler.cc145
-rw-r--r--deps/v8/src/compiler.h129
-rw-r--r--deps/v8/src/contexts.h1
-rw-r--r--deps/v8/src/d8-readline.cc4
-rw-r--r--deps/v8/src/data-flow.cc318
-rw-r--r--deps/v8/src/data-flow.h62
-rw-r--r--deps/v8/src/debug-delay.js8
-rw-r--r--deps/v8/src/debug.cc14
-rw-r--r--deps/v8/src/disassembler.cc2
-rw-r--r--deps/v8/src/execution.cc2
-rw-r--r--deps/v8/src/fast-codegen.cc225
-rw-r--r--deps/v8/src/fast-codegen.h94
-rw-r--r--deps/v8/src/flag-definitions.h4
-rw-r--r--deps/v8/src/frame-element.cc4
-rw-r--r--deps/v8/src/frame-element.h43
-rw-r--r--deps/v8/src/frames-inl.h2
-rw-r--r--deps/v8/src/frames.cc7
-rw-r--r--deps/v8/src/full-codegen.cc105
-rw-r--r--deps/v8/src/full-codegen.h23
-rw-r--r--deps/v8/src/globals.h5
-rw-r--r--deps/v8/src/handles.cc47
-rw-r--r--deps/v8/src/handles.h2
-rw-r--r--deps/v8/src/heap.cc19
-rw-r--r--deps/v8/src/heap.h2
-rw-r--r--deps/v8/src/ia32/assembler-ia32.cc59
-rw-r--r--deps/v8/src/ia32/assembler-ia32.h5
-rw-r--r--deps/v8/src/ia32/builtins-ia32.cc135
-rw-r--r--deps/v8/src/ia32/codegen-ia32.cc1644
-rw-r--r--deps/v8/src/ia32/codegen-ia32.h161
-rw-r--r--deps/v8/src/ia32/debug-ia32.cc3
-rw-r--r--deps/v8/src/ia32/disasm-ia32.cc1
-rw-r--r--deps/v8/src/ia32/fast-codegen-ia32.cc195
-rw-r--r--deps/v8/src/ia32/full-codegen-ia32.cc194
-rw-r--r--deps/v8/src/ia32/ic-ia32.cc462
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc149
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h57
-rw-r--r--deps/v8/src/ia32/stub-cache-ia32.cc634
-rw-r--r--deps/v8/src/ia32/virtual-frame-ia32.cc107
-rw-r--r--deps/v8/src/ia32/virtual-frame-ia32.h23
-rw-r--r--deps/v8/src/ic.cc94
-rw-r--r--deps/v8/src/ic.h32
-rw-r--r--deps/v8/src/json-delay.js36
-rw-r--r--deps/v8/src/jump-target-inl.h3
-rw-r--r--deps/v8/src/jump-target.cc50
-rw-r--r--deps/v8/src/liveedit.cc87
-rw-r--r--deps/v8/src/liveedit.h78
-rw-r--r--deps/v8/src/log-utils.cc9
-rw-r--r--deps/v8/src/log-utils.h3
-rw-r--r--deps/v8/src/log.cc98
-rw-r--r--deps/v8/src/log.h17
-rw-r--r--deps/v8/src/macro-assembler.h9
-rw-r--r--deps/v8/src/math.js2
-rw-r--r--deps/v8/src/messages.js2
-rw-r--r--deps/v8/src/mips/assembler-mips-inl.h215
-rw-r--r--deps/v8/src/mips/assembler-mips.cc1208
-rw-r--r--deps/v8/src/mips/assembler-mips.h663
-rw-r--r--deps/v8/src/mips/builtins-mips.cc109
-rw-r--r--deps/v8/src/mips/codegen-mips-inl.h56
-rw-r--r--deps/v8/src/mips/codegen-mips.cc501
-rw-r--r--deps/v8/src/mips/codegen-mips.h311
-rw-r--r--deps/v8/src/mips/constants-mips.cc323
-rw-r--r--deps/v8/src/mips/constants-mips.h525
-rw-r--r--deps/v8/src/mips/cpu-mips.cc69
-rw-r--r--deps/v8/src/mips/debug-mips.cc112
-rw-r--r--deps/v8/src/mips/disasm-mips.cc784
-rw-r--r--deps/v8/src/mips/fast-codegen-mips.cc56
-rw-r--r--deps/v8/src/mips/frames-mips.cc100
-rw-r--r--deps/v8/src/mips/frames-mips.h164
-rw-r--r--deps/v8/src/mips/full-codegen-mips.cc268
-rw-r--r--deps/v8/src/mips/ic-mips.cc187
-rw-r--r--deps/v8/src/mips/jump-target-mips.cc87
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc895
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.h381
-rw-r--r--deps/v8/src/mips/register-allocator-mips-inl.h137
-rw-r--r--deps/v8/src/mips/register-allocator-mips.cc60
-rw-r--r--deps/v8/src/mips/register-allocator-mips.h46
-rw-r--r--deps/v8/src/mips/simulator-mips.cc1648
-rw-r--r--deps/v8/src/mips/simulator-mips.h311
-rw-r--r--deps/v8/src/mips/stub-cache-mips.cc384
-rw-r--r--deps/v8/src/mips/virtual-frame-mips.cc240
-rw-r--r--deps/v8/src/mips/virtual-frame-mips.h548
-rw-r--r--deps/v8/src/mirror-delay.js26
-rw-r--r--deps/v8/src/number-info.h72
-rw-r--r--deps/v8/src/objects-debug.cc2
-rw-r--r--deps/v8/src/objects-inl.h7
-rw-r--r--deps/v8/src/objects.cc227
-rw-r--r--deps/v8/src/objects.h11
-rw-r--r--deps/v8/src/parser.cc3
-rw-r--r--deps/v8/src/platform-linux.cc17
-rw-r--r--deps/v8/src/property.cc2
-rw-r--r--deps/v8/src/property.h27
-rw-r--r--deps/v8/src/register-allocator-inl.h35
-rw-r--r--deps/v8/src/register-allocator.cc23
-rw-r--r--deps/v8/src/register-allocator.h37
-rw-r--r--deps/v8/src/runtime.cc184
-rw-r--r--deps/v8/src/runtime.h9
-rw-r--r--deps/v8/src/runtime.js63
-rw-r--r--deps/v8/src/simulator.h2
-rw-r--r--deps/v8/src/string.js18
-rw-r--r--deps/v8/src/stub-cache.cc20
-rw-r--r--deps/v8/src/stub-cache.h32
-rw-r--r--deps/v8/src/top.cc7
-rw-r--r--deps/v8/src/top.h1
-rw-r--r--deps/v8/src/utils.cc37
-rw-r--r--deps/v8/src/utils.h42
-rw-r--r--deps/v8/src/v8-counters.h134
-rw-r--r--deps/v8/src/v8.cc5
-rw-r--r--deps/v8/src/v8natives.js156
-rw-r--r--deps/v8/src/version.cc4
-rw-r--r--deps/v8/src/virtual-frame.cc46
-rw-r--r--deps/v8/src/virtual-frame.h2
-rw-r--r--deps/v8/src/x64/assembler-x64.cc130
-rw-r--r--deps/v8/src/x64/assembler-x64.h17
-rw-r--r--deps/v8/src/x64/builtins-x64.cc145
-rw-r--r--deps/v8/src/x64/codegen-x64.cc502
-rw-r--r--deps/v8/src/x64/codegen-x64.h67
-rw-r--r--deps/v8/src/x64/disasm-x64.cc95
-rw-r--r--deps/v8/src/x64/fast-codegen-x64.cc196
-rw-r--r--deps/v8/src/x64/full-codegen-x64.cc148
-rw-r--r--deps/v8/src/x64/ic-x64.cc233
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc144
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h44
-rw-r--r--deps/v8/src/x64/stub-cache-x64.cc218
-rw-r--r--deps/v8/src/x64/virtual-frame-x64.cc87
-rw-r--r--deps/v8/src/x64/virtual-frame-x64.h30
-rw-r--r--deps/v8/test/cctest/SConscript6
-rw-r--r--deps/v8/test/cctest/cctest.status20
-rw-r--r--deps/v8/test/cctest/test-api.cc894
-rw-r--r--deps/v8/test/cctest/test-assembler-mips.cc257
-rw-r--r--deps/v8/test/cctest/test-compiler.cc27
-rw-r--r--deps/v8/test/cctest/test-debug.cc50
-rw-r--r--deps/v8/test/cctest/test-log.cc324
-rw-r--r--deps/v8/test/cctest/test-regexp.cc2
-rw-r--r--deps/v8/test/cctest/test-utils.cc105
-rw-r--r--deps/v8/test/es5conform/es5conform.status80
-rw-r--r--deps/v8/test/message/message.status5
-rw-r--r--deps/v8/test/mjsunit/array-functions-prototype-misc.js314
-rw-r--r--deps/v8/test/mjsunit/array-shift.js71
-rw-r--r--deps/v8/test/mjsunit/array-slice.js162
-rw-r--r--deps/v8/test/mjsunit/array-splice.js535
-rw-r--r--deps/v8/test/mjsunit/array-unshift.js132
-rw-r--r--deps/v8/test/mjsunit/bugs/618.js86
-rw-r--r--deps/v8/test/mjsunit/codegen-coverage.js92
-rw-r--r--deps/v8/test/mjsunit/compiler/assignment.js264
-rw-r--r--deps/v8/test/mjsunit/compiler/simple-bailouts.js127
-rw-r--r--deps/v8/test/mjsunit/compiler/simple-binary-op.js40
-rw-r--r--deps/v8/test/mjsunit/compiler/simple-global-access.js53
-rw-r--r--deps/v8/test/mjsunit/compiler/this-property-refs.js64
-rw-r--r--deps/v8/test/mjsunit/debug-compile-event.js6
-rw-r--r--deps/v8/test/mjsunit/div-mod.js16
-rw-r--r--deps/v8/test/mjsunit/fuzz-natives.js19
-rw-r--r--deps/v8/test/mjsunit/json.js32
-rw-r--r--deps/v8/test/mjsunit/mjsunit.status4
-rw-r--r--deps/v8/test/mjsunit/object-define-properties.js56
-rw-r--r--deps/v8/test/mjsunit/object-define-property.js499
-rw-r--r--deps/v8/test/mjsunit/object-get-own-property-names.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-603.js49
-rw-r--r--deps/v8/test/mjsunit/regress/regress-612.js44
-rw-r--r--deps/v8/test/mjsunit/setter-on-constructor-prototype.js111
-rwxr-xr-x[-rw-r--r--]deps/v8/test/mjsunit/substr.js78
-rw-r--r--deps/v8/test/mjsunit/tools/logreader.js16
-rw-r--r--deps/v8/test/mjsunit/tools/tickprocessor.js4
-rw-r--r--deps/v8/test/mjsunit/typeof.js2
-rw-r--r--deps/v8/test/sputnik/sputnik.status5
-rw-r--r--deps/v8/tools/gyp/v8.gyp3
-rw-r--r--deps/v8/tools/linux-tick-processor11
-rw-r--r--deps/v8/tools/logreader.js45
-rw-r--r--deps/v8/tools/tickprocessor.js13
-rw-r--r--deps/v8/tools/visual_studio/v8_base.vcproj12
-rw-r--r--deps/v8/tools/visual_studio/v8_base_arm.vcproj12
-rw-r--r--deps/v8/tools/visual_studio/v8_base_x64.vcproj12
-rw-r--r--deps/v8/tools/windows-tick-processor.bat26
213 files changed, 23731 insertions, 4858 deletions
diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS
index 5d712fc27..8b0db5c37 100644
--- a/deps/v8/AUTHORS
+++ b/deps/v8/AUTHORS
@@ -4,6 +4,7 @@
# Name/Organization <email address>
Google Inc.
+Sigma Designs Inc.
Alexander Botero-Lowry <alexbl@FreeBSD.org>
Alexandre Vassalotti <avassalotti@gmail.com>
@@ -22,3 +23,4 @@ Rene Rebe <rene@exactcode.de>
Ryan Dahl <coldredlemur@gmail.com>
Patrick Gansterer <paroga@paroga.com>
Subrato K De <subratokde@codeaurora.org>
+Dineel D Sule <dsule@codeaurora.org>
diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog
index 29ecccd7d..8d4cd2233 100644
--- a/deps/v8/ChangeLog
+++ b/deps/v8/ChangeLog
@@ -1,3 +1,17 @@
+2010-02-19: Version 2.1.1
+
+ [ES5] Implemented Object.defineProperty.
+
+ Improved profiler support.
+
+ Added SetPrototype method in the public V8 API.
+
+ Added GetScriptOrigin and GetScriptLineNumber methods to Function
+ objects in the API.
+
+ Performance improvements on all platforms.
+
+
2010-02-03: Version 2.1.0
Values are now always wrapped in objects when used as a receiver.
diff --git a/deps/v8/SConstruct b/deps/v8/SConstruct
index 98fc22fba..5483663fd 100644
--- a/deps/v8/SConstruct
+++ b/deps/v8/SConstruct
@@ -191,6 +191,17 @@ LIBRARY_FLAGS = {
'armvariant:arm': {
'CPPDEFINES': ['V8_ARM_VARIANT_ARM']
},
+ 'arch:mips': {
+ 'CPPDEFINES': ['V8_TARGET_ARCH_MIPS'],
+ 'simulator:none': {
+ 'CCFLAGS': ['-EL', '-mips32r2', '-Wa,-mips32r2', '-fno-inline'],
+ 'LDFLAGS': ['-EL']
+ }
+ },
+ 'simulator:mips': {
+ 'CCFLAGS': ['-m32'],
+ 'LINKFLAGS': ['-m32']
+ },
'arch:x64': {
'CPPDEFINES': ['V8_TARGET_ARCH_X64'],
'CCFLAGS': ['-m64'],
@@ -292,6 +303,9 @@ V8_EXTRA_FLAGS = {
# used by the arm simulator.
'WARNINGFLAGS': ['/wd4996']
},
+ 'arch:mips': {
+ 'CPPDEFINES': ['V8_TARGET_ARCH_MIPS'],
+ },
'disassembler:on': {
'CPPDEFINES': ['ENABLE_DISASSEMBLER']
}
@@ -457,10 +471,22 @@ SAMPLE_FLAGS = {
'CCFLAGS': ['-m64'],
'LINKFLAGS': ['-m64']
},
+ 'arch:mips': {
+ 'CPPDEFINES': ['V8_TARGET_ARCH_MIPS'],
+ 'simulator:none': {
+ 'CCFLAGS': ['-EL', '-mips32r2', '-Wa,-mips32r2', '-fno-inline'],
+ 'LINKFLAGS': ['-EL'],
+ 'LDFLAGS': ['-EL']
+ }
+ },
'simulator:arm': {
'CCFLAGS': ['-m32'],
'LINKFLAGS': ['-m32']
},
+ 'simulator:mips': {
+ 'CCFLAGS': ['-m32'],
+ 'LINKFLAGS': ['-m32']
+ },
'mode:release': {
'CCFLAGS': ['-O2']
},
@@ -601,7 +627,7 @@ SIMPLE_OPTIONS = {
'help': 'the os to build for (' + OS_GUESS + ')'
},
'arch': {
- 'values':['arm', 'ia32', 'x64'],
+ 'values':['arm', 'ia32', 'x64', 'mips'],
'default': ARCH_GUESS,
'help': 'the architecture to build for (' + ARCH_GUESS + ')'
},
@@ -651,7 +677,7 @@ SIMPLE_OPTIONS = {
'help': 'use Microsoft Visual C++ link-time code generation'
},
'simulator': {
- 'values': ['arm', 'none'],
+ 'values': ['arm', 'mips', 'none'],
'default': 'none',
'help': 'build with simulator'
},
@@ -871,6 +897,11 @@ def PostprocessOptions(options):
options['armvariant'] = 'arm'
if (options['armvariant'] != 'none' and options['arch'] != 'arm'):
options['armvariant'] = 'none'
+ if options['arch'] == 'mips':
+ if ('regexp' in ARGUMENTS) and options['regexp'] == 'native':
+ # Print a warning if native regexp is specified for mips
+ print "Warning: forcing regexp to interpreted for mips"
+ options['regexp'] = 'interpreted'
def ParseEnvOverrides(arg, imports):
diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h
index 6125286e8..13f819170 100644
--- a/deps/v8/include/v8.h
+++ b/deps/v8/include/v8.h
@@ -534,51 +534,76 @@ class V8EXPORT ScriptOrigin {
class V8EXPORT Script {
public:
- /**
- * Compiles the specified script. The ScriptOrigin* and ScriptData*
- * parameters are owned by the caller of Script::Compile. No
- * references to these objects are kept after compilation finishes.
- *
- * The script object returned is context independent; when run it
- * will use the currently entered context.
- */
- static Local<Script> New(Handle<String> source,
- ScriptOrigin* origin = NULL,
- ScriptData* pre_data = NULL);
+ /**
+ * Compiles the specified script (context-independent).
+ *
+ * \param source Script source code.
+ * \param origin Script origin, owned by caller, no references are kept
+ * when New() returns
+ * \param pre_data Pre-parsing data, as obtained by ScriptData::PreCompile()
+ * using pre_data speeds compilation if it's done multiple times.
+ * Owned by caller, no references are kept when New() returns.
+ * \param script_data Arbitrary data associated with script. Using
+ * this has same effect as calling SetData(), but allows data to be
+ * available to compile event handlers.
+ * \return Compiled script object (context independent; when run it
+ * will use the currently entered context).
+ */
+ static Local<Script> New(Handle<String> source,
+ ScriptOrigin* origin = NULL,
+ ScriptData* pre_data = NULL,
+ Handle<String> script_data = Handle<String>());
- /**
- * Compiles the specified script using the specified file name
- * object (typically a string) as the script's origin.
- *
- * The script object returned is context independent; when run it
- * will use the currently entered context.
- */
- static Local<Script> New(Handle<String> source,
- Handle<Value> file_name);
-
- /**
- * Compiles the specified script. The ScriptOrigin* and ScriptData*
- * parameters are owned by the caller of Script::Compile. No
- * references to these objects are kept after compilation finishes.
+ /**
+ * Compiles the specified script using the specified file name
+ * object (typically a string) as the script's origin.
+ *
+ * \param source Script source code.
+ * \patam file_name file name object (typically a string) to be used
+ * as the script's origin.
+ * \return Compiled script object (context independent; when run it
+ * will use the currently entered context).
+ */
+ static Local<Script> New(Handle<String> source,
+ Handle<Value> file_name);
+
+ /**
+ * Compiles the specified script (bound to current context).
*
- * The script object returned is bound to the context that was active
- * when this function was called. When run it will always use this
- * context.
+ * \param source Script source code.
+ * \param origin Script origin, owned by caller, no references are kept
+ * when Compile() returns
+ * \param pre_data Pre-parsing data, as obtained by ScriptData::PreCompile()
+ * using pre_data speeds compilation if it's done multiple times.
+ * Owned by caller, no references are kept when Compile() returns.
+ * \param script_data Arbitrary data associated with script. Using
+ * this has same effect as calling SetData(), but makes data available
+ * earlier (i.e. to compile event handlers).
+ * \return Compiled script object, bound to the context that was active
+ * when this function was called. When run it will always use this
+ * context.
*/
static Local<Script> Compile(Handle<String> source,
ScriptOrigin* origin = NULL,
- ScriptData* pre_data = NULL);
+ ScriptData* pre_data = NULL,
+ Handle<String> script_data = Handle<String>());
/**
* Compiles the specified script using the specified file name
* object (typically a string) as the script's origin.
*
- * The script object returned is bound to the context that was active
- * when this function was called. When run it will always use this
- * context.
+ * \param source Script source code.
+ * \param file_name File name to use as script's origin
+ * \param script_data Arbitrary data associated with script. Using
+ * this has same effect as calling SetData(), but makes data available
+ * earlier (i.e. to compile event handlers).
+ * \return Compiled script object, bound to the context that was active
+ * when this function was called. When run it will always use this
+ * context.
*/
static Local<Script> Compile(Handle<String> source,
- Handle<Value> file_name);
+ Handle<Value> file_name,
+ Handle<String> script_data = Handle<String>());
/**
* Runs the script returning the resulting value. If the script is
@@ -1197,6 +1222,13 @@ class V8EXPORT Object : public Value {
Local<Value> GetPrototype();
/**
+ * Set the prototype object. This does not skip objects marked to
+ * be skipped by __proto__ and it does not consult the security
+ * handler.
+ */
+ bool SetPrototype(Handle<Value> prototype);
+
+ /**
* Finds an instance of the given function template in the prototype
* chain.
*/
@@ -1354,7 +1386,15 @@ class V8EXPORT Function : public Object {
Local<Value> Call(Handle<Object> recv, int argc, Handle<Value> argv[]);
void SetName(Handle<String> name);
Handle<Value> GetName() const;
+
+ /**
+ * Returns zero based line number of function body and
+ * kLineOffsetNotFound if no information available.
+ */
+ int GetScriptLineNumber() const;
+ ScriptOrigin GetScriptOrigin() const;
static inline Function* Cast(Value* obj);
+ static const int kLineOffsetNotFound;
private:
Function();
static void CheckCast(Value* obj);
@@ -2309,22 +2349,30 @@ class V8EXPORT V8 {
static bool IsProfilerPaused();
/**
- * Resumes specified profiler modules.
+ * Resumes specified profiler modules. Can be called several times to
+ * mark the opening of a profiler events block with the given tag.
+ *
* "ResumeProfiler" is equivalent to "ResumeProfilerEx(PROFILER_MODULE_CPU)".
* See ProfilerModules enum.
*
* \param flags Flags specifying profiler modules.
+ * \param tag Profile tag.
*/
- static void ResumeProfilerEx(int flags);
+ static void ResumeProfilerEx(int flags, int tag = 0);
/**
- * Pauses specified profiler modules.
+ * Pauses specified profiler modules. Each call to "PauseProfilerEx" closes
+ * a block of profiler events opened by a call to "ResumeProfilerEx" with the
+ * same tag value. There is no need for blocks to be properly nested.
+ * The profiler is paused when the last opened block is closed.
+ *
* "PauseProfiler" is equivalent to "PauseProfilerEx(PROFILER_MODULE_CPU)".
* See ProfilerModules enum.
*
* \param flags Flags specifying profiler modules.
+ * \param tag Profile tag.
*/
- static void PauseProfilerEx(int flags);
+ static void PauseProfilerEx(int flags, int tag = 0);
/**
* Returns active (resumed) profiler modules.
diff --git a/deps/v8/samples/lineprocessor.cc b/deps/v8/samples/lineprocessor.cc
index 505dabf94..61517d36e 100644
--- a/deps/v8/samples/lineprocessor.cc
+++ b/deps/v8/samples/lineprocessor.cc
@@ -152,7 +152,7 @@ int RunMain(int argc, char* argv[]) {
} else if (strcmp(str, "--main-cycle-in-js") == 0) {
cycle_type = CycleInJs;
} else if (strcmp(str, "-p") == 0 && i + 1 < argc) {
- port_number = atoi(argv[i + 1]);
+ port_number = atoi(argv[i + 1]); // NOLINT
i++;
} else if (strncmp(str, "--", 2) == 0) {
printf("Warning: unknown flag %s.\nTry --help for options\n", str);
diff --git a/deps/v8/src/SConscript b/deps/v8/src/SConscript
index ebda77ac2..1a81cc73c 100755
--- a/deps/v8/src/SConscript
+++ b/deps/v8/src/SConscript
@@ -72,6 +72,7 @@ SOURCES = {
interpreter-irregexp.cc
jsregexp.cc
jump-target.cc
+ liveedit.cc
log-utils.cc
log.cc
mark-compact.cc
@@ -131,6 +132,24 @@ SOURCES = {
'armvariant:thumb2': Split("""
arm/assembler-thumb2.cc
"""),
+ 'arch:mips': Split("""
+ mips/assembler-mips.cc
+ mips/builtins-mips.cc
+ mips/codegen-mips.cc
+ mips/constants-mips.cc
+ mips/cpu-mips.cc
+ mips/debug-mips.cc
+ mips/disasm-mips.cc
+ mips/fast-codegen-mips.cc
+ mips/full-codegen-mips.cc
+ mips/frames-mips.cc
+ mips/ic-mips.cc
+ mips/jump-target-mips.cc
+ mips/macro-assembler-mips.cc
+ mips/register-allocator-mips.cc
+ mips/stub-cache-mips.cc
+ mips/virtual-frame-mips.cc
+ """),
'arch:ia32': Split("""
ia32/assembler-ia32.cc
ia32/builtins-ia32.cc
@@ -168,6 +187,7 @@ SOURCES = {
x64/virtual-frame-x64.cc
"""),
'simulator:arm': ['arm/simulator-arm.cc'],
+ 'simulator:mips': ['mips/simulator-mips.cc'],
'os:freebsd': ['platform-freebsd.cc', 'platform-posix.cc'],
'os:openbsd': ['platform-openbsd.cc', 'platform-posix.cc'],
'os:linux': ['platform-linux.cc', 'platform-posix.cc'],
diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/accessors.cc
index 5a029285e..b05719edb 100644
--- a/deps/v8/src/accessors.cc
+++ b/deps/v8/src/accessors.cc
@@ -647,42 +647,9 @@ Object* Accessors::ObjectGetPrototype(Object* receiver, void*) {
Object* Accessors::ObjectSetPrototype(JSObject* receiver,
Object* value,
void*) {
- // Before we can set the prototype we need to be sure
- // prototype cycles are prevented.
- // It is sufficient to validate that the receiver is not in the new prototype
- // chain.
-
- // Silently ignore the change if value is not a JSObject or null.
- // SpiderMonkey behaves this way.
- if (!value->IsJSObject() && !value->IsNull()) return value;
-
- for (Object* pt = value; pt != Heap::null_value(); pt = pt->GetPrototype()) {
- if (JSObject::cast(pt) == receiver) {
- // Cycle detected.
- HandleScope scope;
- return Top::Throw(*Factory::NewError("cyclic_proto",
- HandleVector<Object>(NULL, 0)));
- }
- }
-
- // Find the first object in the chain whose prototype object is not
- // hidden and set the new prototype on that object.
- JSObject* current = receiver;
- Object* current_proto = receiver->GetPrototype();
- while (current_proto->IsJSObject() &&
- JSObject::cast(current_proto)->map()->is_hidden_prototype()) {
- current = JSObject::cast(current_proto);
- current_proto = current_proto->GetPrototype();
- }
-
- // Set the new prototype of the object.
- Object* new_map = current->map()->CopyDropTransitions();
- if (new_map->IsFailure()) return new_map;
- Map::cast(new_map)->set_prototype(value);
- current->set_map(Map::cast(new_map));
-
+ const bool skip_hidden_prototypes = true;
// To be consistent with other Set functions, return the value.
- return value;
+ return receiver->SetPrototype(value, skip_hidden_prototypes);
}
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index 322c90fc5..dbb3d8b74 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -1106,7 +1106,8 @@ ScriptData* ScriptData::New(unsigned* data, int length) {
Local<Script> Script::New(v8::Handle<String> source,
v8::ScriptOrigin* origin,
- v8::ScriptData* script_data) {
+ v8::ScriptData* pre_data,
+ v8::Handle<String> script_data) {
ON_BAILOUT("v8::Script::New()", return Local<Script>());
LOG_API("Script::New");
ENTER_V8;
@@ -1126,20 +1127,17 @@ Local<Script> Script::New(v8::Handle<String> source,
}
}
EXCEPTION_PREAMBLE();
- i::ScriptDataImpl* pre_data = static_cast<i::ScriptDataImpl*>(script_data);
+ i::ScriptDataImpl* pre_data_impl = static_cast<i::ScriptDataImpl*>(pre_data);
// We assert that the pre-data is sane, even though we can actually
// handle it if it turns out not to be in release mode.
- ASSERT(pre_data == NULL || pre_data->SanityCheck());
+ ASSERT(pre_data_impl == NULL || pre_data_impl->SanityCheck());
// If the pre-data isn't sane we simply ignore it
- if (pre_data != NULL && !pre_data->SanityCheck()) {
- pre_data = NULL;
- }
- i::Handle<i::JSFunction> boilerplate = i::Compiler::Compile(str,
- name_obj,
- line_offset,
- column_offset,
- NULL,
- pre_data);
+ if (pre_data_impl != NULL && !pre_data_impl->SanityCheck()) {
+ pre_data_impl = NULL;
+ }
+ i::Handle<i::JSFunction> boilerplate =
+ i::Compiler::Compile(str, name_obj, line_offset, column_offset, NULL,
+ pre_data_impl, Utils::OpenHandle(*script_data));
has_pending_exception = boilerplate.is_null();
EXCEPTION_BAILOUT_CHECK(Local<Script>());
return Local<Script>(ToApi<Script>(boilerplate));
@@ -1155,11 +1153,12 @@ Local<Script> Script::New(v8::Handle<String> source,
Local<Script> Script::Compile(v8::Handle<String> source,
v8::ScriptOrigin* origin,
- v8::ScriptData* script_data) {
+ v8::ScriptData* pre_data,
+ v8::Handle<String> script_data) {
ON_BAILOUT("v8::Script::Compile()", return Local<Script>());
LOG_API("Script::Compile");
ENTER_V8;
- Local<Script> generic = New(source, origin, script_data);
+ Local<Script> generic = New(source, origin, pre_data, script_data);
if (generic.IsEmpty())
return generic;
i::Handle<i::JSFunction> boilerplate = Utils::OpenHandle(*generic);
@@ -1171,9 +1170,10 @@ Local<Script> Script::Compile(v8::Handle<String> source,
Local<Script> Script::Compile(v8::Handle<String> source,
- v8::Handle<Value> file_name) {
+ v8::Handle<Value> file_name,
+ v8::Handle<String> script_data) {
ScriptOrigin origin(file_name);
- return Compile(source, &origin);
+ return Compile(source, &origin, 0, script_data);
}
@@ -2032,6 +2032,19 @@ Local<Value> v8::Object::GetPrototype() {
}
+bool v8::Object::SetPrototype(Handle<Value> value) {
+ ON_BAILOUT("v8::Object::SetPrototype()", return false);
+ ENTER_V8;
+ i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+ i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
+ EXCEPTION_PREAMBLE();
+ i::Handle<i::Object> result = i::SetPrototype(self, value_obj);
+ has_pending_exception = result.is_null();
+ EXCEPTION_BAILOUT_CHECK(false);
+ return true;
+}
+
+
Local<Object> v8::Object::FindInstanceInPrototypeChain(
v8::Handle<FunctionTemplate> tmpl) {
ON_BAILOUT("v8::Object::FindInstanceInPrototypeChain()",
@@ -2194,7 +2207,7 @@ Local<Value> v8::Object::GetRealNamedPropertyInPrototypeChain(
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
i::LookupResult lookup;
self_obj->LookupRealNamedPropertyInPrototypes(*key_obj, &lookup);
- if (lookup.IsValid()) {
+ if (lookup.IsProperty()) {
PropertyAttributes attributes;
i::Handle<i::Object> result(self_obj->GetProperty(*self_obj,
&lookup,
@@ -2213,7 +2226,7 @@ Local<Value> v8::Object::GetRealNamedProperty(Handle<String> key) {
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
i::LookupResult lookup;
self_obj->LookupRealNamedProperty(*key_obj, &lookup);
- if (lookup.IsValid()) {
+ if (lookup.IsProperty()) {
PropertyAttributes attributes;
i::Handle<i::Object> result(self_obj->GetProperty(*self_obj,
&lookup,
@@ -2445,6 +2458,99 @@ Handle<Value> Function::GetName() const {
}
+ScriptOrigin Function::GetScriptOrigin() const {
+ i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
+ if (func->shared()->script()->IsScript()) {
+ i::Handle<i::Script> script(i::Script::cast(func->shared()->script()));
+ v8::ScriptOrigin origin(
+ Utils::ToLocal(i::Handle<i::Object>(script->name())),
+ v8::Integer::New(script->line_offset()->value()),
+ v8::Integer::New(script->column_offset()->value()));
+ return origin;
+ }
+ return v8::ScriptOrigin(Handle<Value>());
+}
+
+
+const int Function::kLineOffsetNotFound = -1;
+
+
+int Function::GetScriptLineNumber() const {
+ i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
+ if (func->shared()->script()->IsScript()) {
+ i::Handle<i::Script> script(i::Script::cast(func->shared()->script()));
+ return i::GetScriptLineNumber(script, func->shared()->start_position());
+ }
+ return kLineOffsetNotFound;
+}
+
+
+namespace {
+
+// Tracks string usage to help make better decisions when
+// externalizing strings.
+//
+// Implementation note: internally this class only tracks fresh
+// strings and keeps a single use counter for them.
+class StringTracker {
+ public:
+ // Records that the given string's characters were copied to some
+ // external buffer. If this happens often we should honor
+ // externalization requests for the string.
+ static void RecordWrite(i::Handle<i::String> string) {
+ i::Address address = reinterpret_cast<i::Address>(*string);
+ i::Address top = i::Heap::NewSpaceTop();
+ if (IsFreshString(address, top)) {
+ IncrementUseCount(top);
+ }
+ }
+
+ // Estimates freshness and use frequency of the given string based
+ // on how close it is to the new space top and the recorded usage
+ // history.
+ static inline bool IsFreshUnusedString(i::Handle<i::String> string) {
+ i::Address address = reinterpret_cast<i::Address>(*string);
+ i::Address top = i::Heap::NewSpaceTop();
+ return IsFreshString(address, top) && IsUseCountLow(top);
+ }
+
+ private:
+ static inline bool IsFreshString(i::Address string, i::Address top) {
+ return top - kFreshnessLimit <= string && string <= top;
+ }
+
+ static inline bool IsUseCountLow(i::Address top) {
+ if (last_top_ != top) return true;
+ return use_count_ < kUseLimit;
+ }
+
+ static inline void IncrementUseCount(i::Address top) {
+ if (last_top_ != top) {
+ use_count_ = 0;
+ last_top_ = top;
+ }
+ ++use_count_;
+ }
+
+ // How close to the new space top a fresh string has to be.
+ static const int kFreshnessLimit = 1024;
+
+ // The number of uses required to consider a string useful.
+ static const int kUseLimit = 32;
+
+ // Single use counter shared by all fresh strings.
+ static int use_count_;
+
+ // Last new space top when the use count above was valid.
+ static i::Address last_top_;
+};
+
+int StringTracker::use_count_ = 0;
+i::Address StringTracker::last_top_ = NULL;
+
+} // namespace
+
+
int String::Length() const {
if (IsDeadCheck("v8::String::Length()")) return 0;
return Utils::OpenHandle(this)->length();
@@ -2462,6 +2568,7 @@ int String::WriteUtf8(char* buffer, int capacity) const {
LOG_API("String::WriteUtf8");
ENTER_V8;
i::Handle<i::String> str = Utils::OpenHandle(this);
+ StringTracker::RecordWrite(str);
write_input_buffer.Reset(0, *str);
int len = str->length();
// Encode the first K - 3 bytes directly into the buffer since we
@@ -2505,6 +2612,7 @@ int String::WriteAscii(char* buffer, int start, int length) const {
ENTER_V8;
ASSERT(start >= 0 && length >= -1);
i::Handle<i::String> str = Utils::OpenHandle(this);
+ StringTracker::RecordWrite(str);
// Flatten the string for efficiency. This applies whether we are
// using StringInputBuffer or Get(i) to access the characters.
str->TryFlattenIfNotFlat();
@@ -2531,6 +2639,7 @@ int String::Write(uint16_t* buffer, int start, int length) const {
ENTER_V8;
ASSERT(start >= 0 && length >= -1);
i::Handle<i::String> str = Utils::OpenHandle(this);
+ StringTracker::RecordWrite(str);
int end = length;
if ( (length == -1) || (length > str->length() - start) )
end = str->length() - start;
@@ -3098,6 +3207,7 @@ bool v8::String::MakeExternal(v8::String::ExternalStringResource* resource) {
if (this->IsExternal()) return false; // Already an external string.
ENTER_V8;
i::Handle<i::String> obj = Utils::OpenHandle(this);
+ if (StringTracker::IsFreshUnusedString(obj)) return false;
bool result = obj->MakeExternal(resource);
if (result && !obj->IsSymbol()) {
i::ExternalStringTable::AddString(*obj);
@@ -3123,6 +3233,7 @@ bool v8::String::MakeExternal(
if (this->IsExternal()) return false; // Already an external string.
ENTER_V8;
i::Handle<i::String> obj = Utils::OpenHandle(this);
+ if (StringTracker::IsFreshUnusedString(obj)) return false;
bool result = obj->MakeExternal(resource);
if (result && !obj->IsSymbol()) {
i::ExternalStringTable::AddString(*obj);
@@ -3134,6 +3245,7 @@ bool v8::String::MakeExternal(
bool v8::String::CanMakeExternal() {
if (IsDeadCheck("v8::String::CanMakeExternal()")) return false;
i::Handle<i::String> obj = Utils::OpenHandle(this);
+ if (StringTracker::IsFreshUnusedString(obj)) return false;
int size = obj->Size(); // Byte size of the original string.
if (size < i::ExternalString::kSize)
return false;
@@ -3357,14 +3469,14 @@ void V8::SetGlobalGCEpilogueCallback(GCCallback callback) {
void V8::PauseProfiler() {
#ifdef ENABLE_LOGGING_AND_PROFILING
- i::Logger::PauseProfiler(PROFILER_MODULE_CPU);
+ PauseProfilerEx(PROFILER_MODULE_CPU);
#endif
}
void V8::ResumeProfiler() {
#ifdef ENABLE_LOGGING_AND_PROFILING
- i::Logger::ResumeProfiler(PROFILER_MODULE_CPU);
+ ResumeProfilerEx(PROFILER_MODULE_CPU);
#endif
}
@@ -3378,7 +3490,7 @@ bool V8::IsProfilerPaused() {
}
-void V8::ResumeProfilerEx(int flags) {
+void V8::ResumeProfilerEx(int flags, int tag) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (flags & PROFILER_MODULE_HEAP_SNAPSHOT) {
// Snapshot mode: resume modules, perform GC, then pause only
@@ -3388,19 +3500,19 @@ void V8::ResumeProfilerEx(int flags) {
// Reset snapshot flag and CPU module flags.
flags &= ~(PROFILER_MODULE_HEAP_SNAPSHOT | PROFILER_MODULE_CPU);
const int current_flags = i::Logger::GetActiveProfilerModules();
- i::Logger::ResumeProfiler(flags);
+ i::Logger::ResumeProfiler(flags, tag);
i::Heap::CollectAllGarbage(false);
- i::Logger::PauseProfiler(~current_flags & flags);
+ i::Logger::PauseProfiler(~current_flags & flags, tag);
} else {
- i::Logger::ResumeProfiler(flags);
+ i::Logger::ResumeProfiler(flags, tag);
}
#endif
}
-void V8::PauseProfilerEx(int flags) {
+void V8::PauseProfilerEx(int flags, int tag) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- i::Logger::PauseProfiler(flags);
+ i::Logger::PauseProfiler(flags, tag);
#endif
}
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index 74547be6e..c79aac656 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -51,9 +51,14 @@ void CpuFeatures::Probe() {
// If the compiler is allowed to use vfp then we can use vfp too in our
// code generation.
#if !defined(__arm__)
- // For the simulator=arm build, always use VFP since the arm simulator has
- // VFP support.
- supported_ |= 1u << VFP3;
+ // For the simulator=arm build, use VFP when FLAG_enable_vfp3 is enabled.
+ if (FLAG_enable_vfp3) {
+ supported_ |= 1u << VFP3;
+ }
+ // For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled
+ if (FLAG_enable_armv7) {
+ supported_ |= 1u << ARMv7;
+ }
#else
if (Serializer::enabled()) {
supported_ |= OS::CpuFeaturesImpliedByPlatform();
@@ -66,6 +71,11 @@ void CpuFeatures::Probe() {
supported_ |= 1u << VFP3;
found_by_runtime_probing_ |= 1u << VFP3;
}
+
+ if (OS::ArmCpuHasFeature(ARMv7)) {
+ supported_ |= 1u << ARMv7;
+ found_by_runtime_probing_ |= 1u << ARMv7;
+ }
#endif
}
@@ -83,9 +93,9 @@ Register r4 = { 4 };
Register r5 = { 5 };
Register r6 = { 6 };
Register r7 = { 7 };
-Register r8 = { 8 };
+Register r8 = { 8 }; // Used as context register.
Register r9 = { 9 };
-Register r10 = { 10 };
+Register r10 = { 10 }; // Used as roots register.
Register fp = { 11 };
Register ip = { 12 };
Register sp = { 13 };
@@ -264,9 +274,9 @@ MemOperand::MemOperand(Register rn, Register rm,
// -----------------------------------------------------------------------------
-// Implementation of Assembler
+// Implementation of Assembler.
-// Instruction encoding bits
+// Instruction encoding bits.
enum {
H = 1 << 5, // halfword (or byte)
S6 = 1 << 6, // signed (or unsigned)
@@ -299,14 +309,14 @@ enum {
B26 = 1 << 26,
B27 = 1 << 27,
- // Instruction bit masks
+ // Instruction bit masks.
RdMask = 15 << 12, // in str instruction
CondMask = 15 << 28,
CoprocessorMask = 15 << 8,
OpCodeMask = 15 << 21, // in data-processing instructions
Imm24Mask = (1 << 24) - 1,
Off12Mask = (1 << 12) - 1,
- // Reserved condition
+ // Reserved condition.
nv = 15 << 28
};
@@ -327,13 +337,13 @@ const Instr kMovLrPc = al | 13*B21 | pc.code() | lr.code() * B12;
// ldr pc, [pc, #XXX]
const Instr kLdrPCPattern = al | B26 | L | pc.code() * B16;
-// spare_buffer_
+// Spare buffer.
static const int kMinimalBufferSize = 4*KB;
static byte* spare_buffer_ = NULL;
Assembler::Assembler(void* buffer, int buffer_size) {
if (buffer == NULL) {
- // do our own buffer management
+ // Do our own buffer management.
if (buffer_size <= kMinimalBufferSize) {
buffer_size = kMinimalBufferSize;
@@ -351,14 +361,14 @@ Assembler::Assembler(void* buffer, int buffer_size) {
own_buffer_ = true;
} else {
- // use externally provided buffer instead
+ // Use externally provided buffer instead.
ASSERT(buffer_size > 0);
buffer_ = static_cast<byte*>(buffer);
buffer_size_ = buffer_size;
own_buffer_ = false;
}
- // setup buffer pointers
+ // Setup buffer pointers.
ASSERT(buffer_ != NULL);
pc_ = buffer_;
reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
@@ -386,11 +396,11 @@ Assembler::~Assembler() {
void Assembler::GetCode(CodeDesc* desc) {
- // emit constant pool if necessary
+ // Emit constant pool if necessary.
CheckConstPool(true, false);
ASSERT(num_prinfo_ == 0);
- // setup desc
+ // Setup code descriptor.
desc->buffer = buffer_;
desc->buffer_size = buffer_size_;
desc->instr_size = pc_offset();
@@ -539,7 +549,7 @@ void Assembler::bind_to(Label* L, int pos) {
void Assembler::link_to(Label* L, Label* appendix) {
if (appendix->is_linked()) {
if (L->is_linked()) {
- // append appendix to L's list
+ // Append appendix to L's list.
int fixup_pos;
int link = L->pos();
do {
@@ -549,7 +559,7 @@ void Assembler::link_to(Label* L, Label* appendix) {
ASSERT(link == kEndOfChain);
target_at_put(fixup_pos, appendix->pos());
} else {
- // L is empty, simply use appendix
+ // L is empty, simply use appendix.
*L = *appendix;
}
}
@@ -575,12 +585,12 @@ void Assembler::next(Label* L) {
}
-// Low-level code emission routines depending on the addressing mode
+// Low-level code emission routines depending on the addressing mode.
static bool fits_shifter(uint32_t imm32,
uint32_t* rotate_imm,
uint32_t* immed_8,
Instr* instr) {
- // imm32 must be unsigned
+ // imm32 must be unsigned.
for (int rot = 0; rot < 16; rot++) {
uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot));
if ((imm8 <= 0xff)) {
@@ -589,7 +599,7 @@ static bool fits_shifter(uint32_t imm32,
return true;
}
}
- // if the opcode is mov or mvn and if ~imm32 fits, change the opcode
+ // If the opcode is mov or mvn and if ~imm32 fits, change the opcode.
if (instr != NULL && (*instr & 0xd*B21) == 0xd*B21) {
if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
*instr ^= 0x2*B21;
@@ -626,7 +636,7 @@ void Assembler::addrmod1(Instr instr,
CheckBuffer();
ASSERT((instr & ~(CondMask | OpCodeMask | S)) == 0);
if (!x.rm_.is_valid()) {
- // immediate
+ // Immediate.
uint32_t rotate_imm;
uint32_t immed_8;
if (MustUseIp(x.rmode_) ||
@@ -634,7 +644,7 @@ void Assembler::addrmod1(Instr instr,
// The immediate operand cannot be encoded as a shifter operand, so load
// it first to register ip and change the original instruction to use ip.
// However, if the original instruction is a 'mov rd, x' (not setting the
- // condition code), then replace it with a 'ldr rd, [pc]'
+ // condition code), then replace it with a 'ldr rd, [pc]'.
RecordRelocInfo(x.rmode_, x.imm32_);
CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed
Condition cond = static_cast<Condition>(instr & CondMask);
@@ -648,16 +658,16 @@ void Assembler::addrmod1(Instr instr,
}
instr |= I | rotate_imm*B8 | immed_8;
} else if (!x.rs_.is_valid()) {
- // immediate shift
+ // Immediate shift.
instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
} else {
- // register shift
+ // Register shift.
ASSERT(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc));
instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code();
}
emit(instr | rn.code()*B16 | rd.code()*B12);
if (rn.is(pc) || x.rm_.is(pc))
- // block constant pool emission for one instruction after reading pc
+ // Block constant pool emission for one instruction after reading pc.
BlockConstPoolBefore(pc_offset() + kInstrSize);
}
@@ -666,15 +676,15 @@ void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
ASSERT((instr & ~(CondMask | B | L)) == B26);
int am = x.am_;
if (!x.rm_.is_valid()) {
- // immediate offset
+ // Immediate offset.
int offset_12 = x.offset_;
if (offset_12 < 0) {
offset_12 = -offset_12;
am ^= U;
}
if (!is_uint12(offset_12)) {
- // immediate offset cannot be encoded, load it first to register ip
- // rn (and rd in a load) should never be ip, or will be trashed
+ // Immediate offset cannot be encoded, load it first to register ip
+ // rn (and rd in a load) should never be ip, or will be trashed.
ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
mov(ip, Operand(x.offset_), LeaveCC,
static_cast<Condition>(instr & CondMask));
@@ -684,9 +694,9 @@ void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
ASSERT(offset_12 >= 0); // no masking needed
instr |= offset_12;
} else {
- // register offset (shift_imm_ and shift_op_ are 0) or scaled
+ // Register offset (shift_imm_ and shift_op_ are 0) or scaled
// register offset the constructors make sure than both shift_imm_
- // and shift_op_ are initialized
+ // and shift_op_ are initialized.
ASSERT(!x.rm_.is(pc));
instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
}
@@ -700,15 +710,15 @@ void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
ASSERT(x.rn_.is_valid());
int am = x.am_;
if (!x.rm_.is_valid()) {
- // immediate offset
+ // Immediate offset.
int offset_8 = x.offset_;
if (offset_8 < 0) {
offset_8 = -offset_8;
am ^= U;
}
if (!is_uint8(offset_8)) {
- // immediate offset cannot be encoded, load it first to register ip
- // rn (and rd in a load) should never be ip, or will be trashed
+ // Immediate offset cannot be encoded, load it first to register ip
+ // rn (and rd in a load) should never be ip, or will be trashed.
ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
mov(ip, Operand(x.offset_), LeaveCC,
static_cast<Condition>(instr & CondMask));
@@ -718,15 +728,15 @@ void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
ASSERT(offset_8 >= 0); // no masking needed
instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf);
} else if (x.shift_imm_ != 0) {
- // scaled register offset not supported, load index first
- // rn (and rd in a load) should never be ip, or will be trashed
+ // Scaled register offset not supported, load index first
+ // rn (and rd in a load) should never be ip, or will be trashed.
ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC,
static_cast<Condition>(instr & CondMask));
addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
return;
} else {
- // register offset
+ // Register offset.
ASSERT((am & (P|W)) == P || !x.rm_.is(pc)); // no pc index with writeback
instr |= x.rm_.code();
}
@@ -744,7 +754,7 @@ void Assembler::addrmod4(Instr instr, Register rn, RegList rl) {
void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
- // unindexed addressing is not encoded by this function
+ // Unindexed addressing is not encoded by this function.
ASSERT_EQ((B27 | B26),
(instr & ~(CondMask | CoprocessorMask | P | U | N | W | L)));
ASSERT(x.rn_.is_valid() && !x.rm_.is_valid());
@@ -759,7 +769,7 @@ void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
ASSERT(is_uint8(offset_8)); // unsigned word offset must fit in a byte
ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
- // post-indexed addressing requires W == 1; different than in addrmod2/3
+ // Post-indexed addressing requires W == 1; different than in addrmod2/3.
if ((am & P) == 0)
am |= W;
@@ -782,7 +792,7 @@ int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
}
// Block the emission of the constant pool, since the branch instruction must
- // be emitted at the pc offset recorded by the label
+ // be emitted at the pc offset recorded by the label.
BlockConstPoolBefore(pc_offset() + kInstrSize);
return target_pos - (pc_offset() + kPcLoadDelta);
}
@@ -804,7 +814,7 @@ void Assembler::label_at_put(Label* L, int at_offset) {
}
-// Branch instructions
+// Branch instructions.
void Assembler::b(int branch_offset, Condition cond) {
ASSERT((branch_offset & 3) == 0);
int imm24 = branch_offset >> 2;
@@ -812,7 +822,7 @@ void Assembler::b(int branch_offset, Condition cond) {
emit(cond | B27 | B25 | (imm24 & Imm24Mask));
if (cond == al)
- // dead code is a good location to emit the constant pool
+ // Dead code is a good location to emit the constant pool.
CheckConstPool(false, false);
}
@@ -849,7 +859,22 @@ void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t
}
-// Data-processing instructions
+// Data-processing instructions.
+
+// UBFX <Rd>,<Rn>,#<lsb>,#<width - 1>
+// Instruction details available in ARM DDI 0406A, A8-464.
+// cond(31-28) | 01111(27-23)| 1(22) | 1(21) | widthm1(20-16) |
+// Rd(15-12) | lsb(11-7) | 101(6-4) | Rn(3-0)
+void Assembler::ubfx(Register dst, Register src1, const Operand& src2,
+ const Operand& src3, Condition cond) {
+ ASSERT(!src2.rm_.is_valid() && !src3.rm_.is_valid());
+ ASSERT(static_cast<uint32_t>(src2.imm32_) <= 0x1f);
+ ASSERT(static_cast<uint32_t>(src3.imm32_) <= 0x1f);
+ emit(cond | 0x3F*B21 | src3.imm32_*B16 |
+ dst.code()*B12 | src2.imm32_*B7 | 0x5*B4 | src1.code());
+}
+
+
void Assembler::and_(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) {
addrmod1(cond | 0*B21 | s, src1, dst, src2);
@@ -886,7 +911,7 @@ void Assembler::add(Register dst, Register src1, const Operand& src2,
if (FLAG_push_pop_elimination &&
last_bound_pos_ <= (pc_offset() - pattern_size) &&
reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
- // pattern
+ // Pattern.
instr_at(pc_ - 1 * kInstrSize) == kPopInstruction &&
(instr_at(pc_ - 2 * kInstrSize) & ~RdMask) == kPushRegPattern) {
pc_ -= 2 * kInstrSize;
@@ -960,7 +985,7 @@ void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) {
}
-// Multiply instructions
+// Multiply instructions.
void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
SBit s, Condition cond) {
ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
@@ -1029,7 +1054,7 @@ void Assembler::umull(Register dstL,
}
-// Miscellaneous arithmetic instructions
+// Miscellaneous arithmetic instructions.
void Assembler::clz(Register dst, Register src, Condition cond) {
// v5 and above.
ASSERT(!dst.is(pc) && !src.is(pc));
@@ -1038,7 +1063,7 @@ void Assembler::clz(Register dst, Register src, Condition cond) {
}
-// Status register access instructions
+// Status register access instructions.
void Assembler::mrs(Register dst, SRegister s, Condition cond) {
ASSERT(!dst.is(pc));
emit(cond | B24 | s | 15*B16 | dst.code()*B12);
@@ -1050,12 +1075,12 @@ void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
ASSERT(fields >= B16 && fields < B20); // at least one field set
Instr instr;
if (!src.rm_.is_valid()) {
- // immediate
+ // Immediate.
uint32_t rotate_imm;
uint32_t immed_8;
if (MustUseIp(src.rmode_) ||
!fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
- // immediate operand cannot be encoded, load it first to register ip
+ // Immediate operand cannot be encoded, load it first to register ip.
RecordRelocInfo(src.rmode_, src.imm32_);
ldr(ip, MemOperand(pc, 0), cond);
msr(fields, Operand(ip), cond);
@@ -1070,7 +1095,7 @@ void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
}
-// Load/Store instructions
+// Load/Store instructions.
void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
if (dst.is(pc)) {
WriteRecordedPositions();
@@ -1085,7 +1110,7 @@ void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
if (FLAG_push_pop_elimination &&
last_bound_pos_ <= (pc_offset() - pattern_size) &&
reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
- // pattern
+ // Pattern.
instr_at(pc_ - 1 * kInstrSize) == (kPopRegPattern | dst.code() * B12) &&
instr_at(pc_ - 2 * kInstrSize) == (kPushRegPattern | dst.code() * B12)) {
pc_ -= 2 * kInstrSize;
@@ -1106,6 +1131,7 @@ void Assembler::str(Register src, const MemOperand& dst, Condition cond) {
if (FLAG_push_pop_elimination &&
last_bound_pos_ <= (pc_offset() - pattern_size) &&
reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
+ // Pattern.
instr_at(pc_ - 1 * kInstrSize) == (kPushRegPattern | src.code() * B12) &&
instr_at(pc_ - 2 * kInstrSize) == kPopInstruction) {
pc_ -= 2 * kInstrSize;
@@ -1147,17 +1173,17 @@ void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) {
}
-// Load/Store multiple instructions
+// Load/Store multiple instructions.
void Assembler::ldm(BlockAddrMode am,
Register base,
RegList dst,
Condition cond) {
- // ABI stack constraint: ldmxx base, {..sp..} base != sp is not restartable
+ // ABI stack constraint: ldmxx base, {..sp..} base != sp is not restartable.
ASSERT(base.is(sp) || (dst & sp.bit()) == 0);
addrmod4(cond | B27 | am | L, base, dst);
- // emit the constant pool after a function return implemented by ldm ..{..pc}
+ // Emit the constant pool after a function return implemented by ldm ..{..pc}.
if (cond == al && (dst & pc.bit()) != 0) {
// There is a slight chance that the ldm instruction was actually a call,
// in which case it would be wrong to return into the constant pool; we
@@ -1177,7 +1203,7 @@ void Assembler::stm(BlockAddrMode am,
}
-// Semaphore instructions
+// Semaphore instructions.
void Assembler::swp(Register dst, Register src, Register base, Condition cond) {
ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc));
ASSERT(!dst.is(base) && !src.is(base));
@@ -1197,7 +1223,7 @@ void Assembler::swpb(Register dst,
}
-// Exception-generating instructions and debugging support
+// Exception-generating instructions and debugging support.
void Assembler::stop(const char* msg) {
#if !defined(__arm__)
// The simulator handles these special instructions and stops execution.
@@ -1222,7 +1248,7 @@ void Assembler::swi(uint32_t imm24, Condition cond) {
}
-// Coprocessor instructions
+// Coprocessor instructions.
void Assembler::cdp(Coprocessor coproc,
int opcode_1,
CRegister crd,
@@ -1307,7 +1333,7 @@ void Assembler::ldc(Coprocessor coproc,
int option,
LFlag l,
Condition cond) {
- // unindexed addressing
+ // Unindexed addressing.
ASSERT(is_uint8(option));
emit(cond | B27 | B26 | U | l | L | rn.code()*B16 | crd.code()*B12 |
coproc*B8 | (option & 255));
@@ -1346,7 +1372,7 @@ void Assembler::stc(Coprocessor coproc,
int option,
LFlag l,
Condition cond) {
- // unindexed addressing
+ // Unindexed addressing.
ASSERT(is_uint8(option));
emit(cond | B27 | B26 | U | l | rn.code()*B16 | crd.code()*B12 |
coproc*B8 | (option & 255));
@@ -1464,7 +1490,7 @@ void Assembler::vcvt(const DwVfpRegister dst,
const Condition cond) {
// Dd = Sm (integer in Sm converted to IEEE 64-bit doubles in Dd).
// Instruction details available in ARM DDI 0406A, A8-576.
- // cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) |opc2=000(18-16) |
+ // cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) | opc2=000(18-16) |
// Vd(15-12) | 101(11-9) | sz(8)=1 | op(7)=1 | 1(6) | M=?(5) | 0(4) | Vm(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(cond | 0xE*B24 | B23 | 0x3*B20 | B19 |
@@ -1571,14 +1597,14 @@ void Assembler::vmrs(Register dst, Condition cond) {
}
-// Pseudo instructions
+// Pseudo instructions.
void Assembler::lea(Register dst,
const MemOperand& x,
SBit s,
Condition cond) {
int am = x.am_;
if (!x.rm_.is_valid()) {
- // immediate offset
+ // Immediate offset.
if ((am & P) == 0) // post indexing
mov(dst, Operand(x.rn_), s, cond);
else if ((am & U) == 0) // negative indexing
@@ -1612,7 +1638,7 @@ void Assembler::BlockConstPoolFor(int instructions) {
}
-// Debugging
+// Debugging.
void Assembler::RecordJSReturn() {
WriteRecordedPositions();
CheckBuffer();
@@ -1665,7 +1691,7 @@ void Assembler::WriteRecordedPositions() {
void Assembler::GrowBuffer() {
if (!own_buffer_) FATAL("external code buffer is too small");
- // compute new buffer size
+ // Compute new buffer size.
CodeDesc desc; // the new buffer
if (buffer_size_ < 4*KB) {
desc.buffer_size = 4*KB;
@@ -1676,20 +1702,20 @@ void Assembler::GrowBuffer() {
}
CHECK_GT(desc.buffer_size, 0); // no overflow
- // setup new buffer
+ // Setup new buffer.
desc.buffer = NewArray<byte>(desc.buffer_size);
desc.instr_size = pc_offset();
desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
- // copy the data
+ // Copy the data.
int pc_delta = desc.buffer - buffer_;
int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
memmove(desc.buffer, buffer_, desc.instr_size);
memmove(reloc_info_writer.pos() + rc_delta,
reloc_info_writer.pos(), desc.reloc_size);
- // switch buffers
+ // Switch buffers.
DeleteArray(buffer_);
buffer_ = desc.buffer;
buffer_size_ = desc.buffer_size;
@@ -1697,11 +1723,11 @@ void Assembler::GrowBuffer() {
reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
reloc_info_writer.last_pc() + pc_delta);
- // none of our relocation types are pc relative pointing outside the code
+ // None of our relocation types are pc relative pointing outside the code
// buffer nor pc absolute pointing inside the code buffer, so there is no need
- // to relocate any emitted relocation entries
+ // to relocate any emitted relocation entries.
- // relocate pending relocation entries
+ // Relocate pending relocation entries.
for (int i = 0; i < num_prinfo_; i++) {
RelocInfo& rinfo = prinfo_[i];
ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
@@ -1716,16 +1742,16 @@ void Assembler::GrowBuffer() {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
RelocInfo rinfo(pc_, rmode, data); // we do not try to reuse pool constants
if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::STATEMENT_POSITION) {
- // Adjust code for new modes
+ // Adjust code for new modes.
ASSERT(RelocInfo::IsJSReturn(rmode)
|| RelocInfo::IsComment(rmode)
|| RelocInfo::IsPosition(rmode));
- // these modes do not need an entry in the constant pool
+ // These modes do not need an entry in the constant pool.
} else {
ASSERT(num_prinfo_ < kMaxNumPRInfo);
prinfo_[num_prinfo_++] = rinfo;
// Make sure the constant pool is not emitted in place of the next
- // instruction for which we just recorded relocation info
+ // instruction for which we just recorded relocation info.
BlockConstPoolBefore(pc_offset() + kInstrSize);
}
if (rinfo.rmode() != RelocInfo::NONE) {
@@ -1752,7 +1778,7 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// blocked for a specific range.
next_buffer_check_ = pc_offset() + kCheckConstInterval;
- // There is nothing to do if there are no pending relocation info entries
+ // There is nothing to do if there are no pending relocation info entries.
if (num_prinfo_ == 0) return;
// We emit a constant pool at regular intervals of about kDistBetweenPools
@@ -1778,10 +1804,11 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// no_const_pool_before_, which is checked here. Also, recursive calls to
// CheckConstPool are blocked by no_const_pool_before_.
if (pc_offset() < no_const_pool_before_) {
- // Emission is currently blocked; make sure we try again as soon as possible
+ // Emission is currently blocked; make sure we try again as soon as
+ // possible.
next_buffer_check_ = no_const_pool_before_;
- // Something is wrong if emission is forced and blocked at the same time
+ // Something is wrong if emission is forced and blocked at the same time.
ASSERT(!force_emit);
return;
}
@@ -1795,23 +1822,23 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
jump_instr + kInstrSize + num_prinfo_*(kInstrSize + kMaxRelocSize);
while (buffer_space() <= (max_needed_space + kGap)) GrowBuffer();
- // Block recursive calls to CheckConstPool
+ // Block recursive calls to CheckConstPool.
BlockConstPoolBefore(pc_offset() + jump_instr + kInstrSize +
num_prinfo_*kInstrSize);
// Don't bother to check for the emit calls below.
next_buffer_check_ = no_const_pool_before_;
- // Emit jump over constant pool if necessary
+ // Emit jump over constant pool if necessary.
Label after_pool;
if (require_jump) b(&after_pool);
RecordComment("[ Constant Pool");
- // Put down constant pool marker
- // "Undefined instruction" as specified by A3.1 Instruction set encoding
+ // Put down constant pool marker "Undefined instruction" as specified by
+ // A3.1 Instruction set encoding.
emit(0x03000000 | num_prinfo_);
- // Emit constant pool entries
+ // Emit constant pool entries.
for (int i = 0; i < num_prinfo_; i++) {
RelocInfo& rinfo = prinfo_[i];
ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
@@ -1819,8 +1846,8 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
rinfo.rmode() != RelocInfo::STATEMENT_POSITION);
Instr instr = instr_at(rinfo.pc());
- // Instruction to patch must be a ldr/str [pc, #offset]
- // P and U set, B and W clear, Rn == pc, offset12 still 0
+ // Instruction to patch must be a ldr/str [pc, #offset].
+ // P and U set, B and W clear, Rn == pc, offset12 still 0.
ASSERT((instr & (7*B25 | P | U | B | W | 15*B16 | Off12Mask)) ==
(2*B25 | P | U | pc.code()*B16));
int delta = pc_ - rinfo.pc() - 8;
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index 208d583ce..f6b7a06aa 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -80,7 +80,7 @@ struct Register {
return 1 << code_;
}
- // (unfortunately we can't make this private in a struct)
+ // Unfortunately we can't make this private in a struct.
int code_;
};
@@ -205,7 +205,7 @@ struct CRegister {
return 1 << code_;
}
- // (unfortunately we can't make this private in a struct)
+ // Unfortunately we can't make this private in a struct.
int code_;
};
@@ -250,7 +250,7 @@ enum Coprocessor {
};
-// Condition field in instructions
+// Condition field in instructions.
enum Condition {
eq = 0 << 28, // Z set equal.
ne = 1 << 28, // Z clear not equal.
@@ -628,6 +628,9 @@ class Assembler : public Malloced {
void blx(Label* L) { blx(branch_offset(L, false)); } // v5 and above
// Data-processing instructions
+ void ubfx(Register dst, Register src1, const Operand& src2,
+ const Operand& src3, Condition cond = al);
+
void and_(Register dst, Register src1, const Operand& src2,
SBit s = LeaveCC, Condition cond = al);
diff --git a/deps/v8/src/arm/assembler-thumb2-inl.h b/deps/v8/src/arm/assembler-thumb2-inl.h
index 3808ef00f..9e0fc2f73 100644
--- a/deps/v8/src/arm/assembler-thumb2-inl.h
+++ b/deps/v8/src/arm/assembler-thumb2-inl.h
@@ -174,20 +174,6 @@ Operand::Operand(const ExternalReference& f) {
}
-Operand::Operand(Object** opp) {
- rm_ = no_reg;
- imm32_ = reinterpret_cast<int32_t>(opp);
- rmode_ = RelocInfo::NONE;
-}
-
-
-Operand::Operand(Context** cpp) {
- rm_ = no_reg;
- imm32_ = reinterpret_cast<int32_t>(cpp);
- rmode_ = RelocInfo::NONE;
-}
-
-
Operand::Operand(Smi* value) {
rm_ = no_reg;
imm32_ = reinterpret_cast<intptr_t>(value);
@@ -229,14 +215,24 @@ void Assembler::emit(Instr x) {
Address Assembler::target_address_address_at(Address pc) {
- Instr instr = Memory::int32_at(pc);
- // Verify that the instruction at pc is a ldr<cond> <Rd>, [pc +/- offset_12].
+ Address target_pc = pc;
+ Instr instr = Memory::int32_at(target_pc);
+ // If we have a bx instruction, the instruction before the bx is
+ // what we need to patch.
+ static const int32_t kBxInstMask = 0x0ffffff0;
+ static const int32_t kBxInstPattern = 0x012fff10;
+ if ((instr & kBxInstMask) == kBxInstPattern) {
+ target_pc -= kInstrSize;
+ instr = Memory::int32_at(target_pc);
+ }
+ // Verify that the instruction to patch is a
+ // ldr<cond> <Rd>, [pc +/- offset_12].
ASSERT((instr & 0x0f7f0000) == 0x051f0000);
int offset = instr & 0xfff; // offset_12 is unsigned
if ((instr & (1 << 23)) == 0) offset = -offset; // U bit defines offset sign
// Verify that the constant pool comes after the instruction referencing it.
ASSERT(offset >= -4);
- return pc + offset + 8;
+ return target_pc + offset + 8;
}
diff --git a/deps/v8/src/arm/assembler-thumb2.cc b/deps/v8/src/arm/assembler-thumb2.cc
index 6c2b9032f..e31c42917 100644
--- a/deps/v8/src/arm/assembler-thumb2.cc
+++ b/deps/v8/src/arm/assembler-thumb2.cc
@@ -30,9 +30,9 @@
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
-// The original source code covered by the above license above has been modified
-// significantly by Google Inc.
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2010 the V8 project authors. All rights reserved.
#include "v8.h"
@@ -51,9 +51,14 @@ void CpuFeatures::Probe() {
// If the compiler is allowed to use vfp then we can use vfp too in our
// code generation.
#if !defined(__arm__)
- // For the simulator=arm build, always use VFP since the arm simulator has
- // VFP support.
- supported_ |= 1u << VFP3;
+ // For the simulator=arm build, use VFP when FLAG_enable_vfp3 is enabled.
+ if (FLAG_enable_vfp3) {
+ supported_ |= 1u << VFP3;
+ }
+ // For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled
+ if (FLAG_enable_armv7) {
+ supported_ |= 1u << ARMv7;
+ }
#else
if (Serializer::enabled()) {
supported_ |= OS::CpuFeaturesImpliedByPlatform();
@@ -66,6 +71,11 @@ void CpuFeatures::Probe() {
supported_ |= 1u << VFP3;
found_by_runtime_probing_ |= 1u << VFP3;
}
+
+ if (OS::ArmCpuHasFeature(ARMv7)) {
+ supported_ |= 1u << ARMv7;
+ found_by_runtime_probing_ |= 1u << ARMv7;
+ }
#endif
}
@@ -83,9 +93,9 @@ Register r4 = { 4 };
Register r5 = { 5 };
Register r6 = { 6 };
Register r7 = { 7 };
-Register r8 = { 8 };
+Register r8 = { 8 }; // Used as context register.
Register r9 = { 9 };
-Register r10 = { 10 };
+Register r10 = { 10 }; // Used as roots register.
Register fp = { 11 };
Register ip = { 12 };
Register sp = { 13 };
@@ -264,9 +274,9 @@ MemOperand::MemOperand(Register rn, Register rm,
// -----------------------------------------------------------------------------
-// Implementation of Assembler
+// Implementation of Assembler.
-// Instruction encoding bits
+// Instruction encoding bits.
enum {
H = 1 << 5, // halfword (or byte)
S6 = 1 << 6, // signed (or unsigned)
@@ -299,14 +309,14 @@ enum {
B26 = 1 << 26,
B27 = 1 << 27,
- // Instruction bit masks
+ // Instruction bit masks.
RdMask = 15 << 12, // in str instruction
CondMask = 15 << 28,
CoprocessorMask = 15 << 8,
OpCodeMask = 15 << 21, // in data-processing instructions
Imm24Mask = (1 << 24) - 1,
Off12Mask = (1 << 12) - 1,
- // Reserved condition
+ // Reserved condition.
nv = 15 << 28
};
@@ -327,13 +337,13 @@ const Instr kMovLrPc = al | 13*B21 | pc.code() | lr.code() * B12;
// ldr pc, [pc, #XXX]
const Instr kLdrPCPattern = al | B26 | L | pc.code() * B16;
-// spare_buffer_
+// Spare buffer.
static const int kMinimalBufferSize = 4*KB;
static byte* spare_buffer_ = NULL;
Assembler::Assembler(void* buffer, int buffer_size) {
if (buffer == NULL) {
- // do our own buffer management
+ // Do our own buffer management.
if (buffer_size <= kMinimalBufferSize) {
buffer_size = kMinimalBufferSize;
@@ -351,14 +361,14 @@ Assembler::Assembler(void* buffer, int buffer_size) {
own_buffer_ = true;
} else {
- // use externally provided buffer instead
+ // Use externally provided buffer instead.
ASSERT(buffer_size > 0);
buffer_ = static_cast<byte*>(buffer);
buffer_size_ = buffer_size;
own_buffer_ = false;
}
- // setup buffer pointers
+ // Setup buffer pointers.
ASSERT(buffer_ != NULL);
pc_ = buffer_;
reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
@@ -386,11 +396,11 @@ Assembler::~Assembler() {
void Assembler::GetCode(CodeDesc* desc) {
- // emit constant pool if necessary
+ // Emit constant pool if necessary.
CheckConstPool(true, false);
ASSERT(num_prinfo_ == 0);
- // setup desc
+ // Setup code descriptor.
desc->buffer = buffer_;
desc->buffer_size = buffer_size_;
desc->instr_size = pc_offset();
@@ -539,7 +549,7 @@ void Assembler::bind_to(Label* L, int pos) {
void Assembler::link_to(Label* L, Label* appendix) {
if (appendix->is_linked()) {
if (L->is_linked()) {
- // append appendix to L's list
+ // Append appendix to L's list.
int fixup_pos;
int link = L->pos();
do {
@@ -549,7 +559,7 @@ void Assembler::link_to(Label* L, Label* appendix) {
ASSERT(link == kEndOfChain);
target_at_put(fixup_pos, appendix->pos());
} else {
- // L is empty, simply use appendix
+ // L is empty, simply use appendix.
*L = *appendix;
}
}
@@ -575,12 +585,12 @@ void Assembler::next(Label* L) {
}
-// Low-level code emission routines depending on the addressing mode
+// Low-level code emission routines depending on the addressing mode.
static bool fits_shifter(uint32_t imm32,
uint32_t* rotate_imm,
uint32_t* immed_8,
Instr* instr) {
- // imm32 must be unsigned
+ // imm32 must be unsigned.
for (int rot = 0; rot < 16; rot++) {
uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot));
if ((imm8 <= 0xff)) {
@@ -589,7 +599,7 @@ static bool fits_shifter(uint32_t imm32,
return true;
}
}
- // if the opcode is mov or mvn and if ~imm32 fits, change the opcode
+ // If the opcode is mov or mvn and if ~imm32 fits, change the opcode.
if (instr != NULL && (*instr & 0xd*B21) == 0xd*B21) {
if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
*instr ^= 0x2*B21;
@@ -626,7 +636,7 @@ void Assembler::addrmod1(Instr instr,
CheckBuffer();
ASSERT((instr & ~(CondMask | OpCodeMask | S)) == 0);
if (!x.rm_.is_valid()) {
- // immediate
+ // Immediate.
uint32_t rotate_imm;
uint32_t immed_8;
if (MustUseIp(x.rmode_) ||
@@ -634,7 +644,7 @@ void Assembler::addrmod1(Instr instr,
// The immediate operand cannot be encoded as a shifter operand, so load
// it first to register ip and change the original instruction to use ip.
// However, if the original instruction is a 'mov rd, x' (not setting the
- // condition code), then replace it with a 'ldr rd, [pc]'
+ // condition code), then replace it with a 'ldr rd, [pc]'.
RecordRelocInfo(x.rmode_, x.imm32_);
CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed
Condition cond = static_cast<Condition>(instr & CondMask);
@@ -648,16 +658,16 @@ void Assembler::addrmod1(Instr instr,
}
instr |= I | rotate_imm*B8 | immed_8;
} else if (!x.rs_.is_valid()) {
- // immediate shift
+ // Immediate shift.
instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
} else {
- // register shift
+ // Register shift.
ASSERT(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc));
instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code();
}
emit(instr | rn.code()*B16 | rd.code()*B12);
if (rn.is(pc) || x.rm_.is(pc))
- // block constant pool emission for one instruction after reading pc
+ // Block constant pool emission for one instruction after reading pc.
BlockConstPoolBefore(pc_offset() + kInstrSize);
}
@@ -666,15 +676,15 @@ void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
ASSERT((instr & ~(CondMask | B | L)) == B26);
int am = x.am_;
if (!x.rm_.is_valid()) {
- // immediate offset
+ // Immediate offset.
int offset_12 = x.offset_;
if (offset_12 < 0) {
offset_12 = -offset_12;
am ^= U;
}
if (!is_uint12(offset_12)) {
- // immediate offset cannot be encoded, load it first to register ip
- // rn (and rd in a load) should never be ip, or will be trashed
+ // Immediate offset cannot be encoded, load it first to register ip
+ // rn (and rd in a load) should never be ip, or will be trashed.
ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
mov(ip, Operand(x.offset_), LeaveCC,
static_cast<Condition>(instr & CondMask));
@@ -684,9 +694,9 @@ void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
ASSERT(offset_12 >= 0); // no masking needed
instr |= offset_12;
} else {
- // register offset (shift_imm_ and shift_op_ are 0) or scaled
+ // Register offset (shift_imm_ and shift_op_ are 0) or scaled
// register offset the constructors make sure than both shift_imm_
- // and shift_op_ are initialized
+ // and shift_op_ are initialized.
ASSERT(!x.rm_.is(pc));
instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
}
@@ -700,15 +710,15 @@ void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
ASSERT(x.rn_.is_valid());
int am = x.am_;
if (!x.rm_.is_valid()) {
- // immediate offset
+ // Immediate offset.
int offset_8 = x.offset_;
if (offset_8 < 0) {
offset_8 = -offset_8;
am ^= U;
}
if (!is_uint8(offset_8)) {
- // immediate offset cannot be encoded, load it first to register ip
- // rn (and rd in a load) should never be ip, or will be trashed
+ // Immediate offset cannot be encoded, load it first to register ip
+ // rn (and rd in a load) should never be ip, or will be trashed.
ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
mov(ip, Operand(x.offset_), LeaveCC,
static_cast<Condition>(instr & CondMask));
@@ -718,15 +728,15 @@ void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
ASSERT(offset_8 >= 0); // no masking needed
instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf);
} else if (x.shift_imm_ != 0) {
- // scaled register offset not supported, load index first
- // rn (and rd in a load) should never be ip, or will be trashed
+ // Scaled register offset not supported, load index first
+ // rn (and rd in a load) should never be ip, or will be trashed.
ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC,
static_cast<Condition>(instr & CondMask));
addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
return;
} else {
- // register offset
+ // Register offset.
ASSERT((am & (P|W)) == P || !x.rm_.is(pc)); // no pc index with writeback
instr |= x.rm_.code();
}
@@ -744,7 +754,7 @@ void Assembler::addrmod4(Instr instr, Register rn, RegList rl) {
void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
- // unindexed addressing is not encoded by this function
+ // Unindexed addressing is not encoded by this function.
ASSERT_EQ((B27 | B26),
(instr & ~(CondMask | CoprocessorMask | P | U | N | W | L)));
ASSERT(x.rn_.is_valid() && !x.rm_.is_valid());
@@ -759,7 +769,7 @@ void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
ASSERT(is_uint8(offset_8)); // unsigned word offset must fit in a byte
ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
- // post-indexed addressing requires W == 1; different than in addrmod2/3
+ // Post-indexed addressing requires W == 1; different than in addrmod2/3.
if ((am & P) == 0)
am |= W;
@@ -782,7 +792,7 @@ int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
}
// Block the emission of the constant pool, since the branch instruction must
- // be emitted at the pc offset recorded by the label
+ // be emitted at the pc offset recorded by the label.
BlockConstPoolBefore(pc_offset() + kInstrSize);
return target_pos - (pc_offset() + kPcLoadDelta);
}
@@ -804,7 +814,7 @@ void Assembler::label_at_put(Label* L, int at_offset) {
}
-// Branch instructions
+// Branch instructions.
void Assembler::b(int branch_offset, Condition cond) {
ASSERT((branch_offset & 3) == 0);
int imm24 = branch_offset >> 2;
@@ -812,7 +822,7 @@ void Assembler::b(int branch_offset, Condition cond) {
emit(cond | B27 | B25 | (imm24 & Imm24Mask));
if (cond == al)
- // dead code is a good location to emit the constant pool
+ // Dead code is a good location to emit the constant pool.
CheckConstPool(false, false);
}
@@ -849,7 +859,22 @@ void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t
}
-// Data-processing instructions
+// Data-processing instructions.
+
+// UBFX <Rd>,<Rn>,#<lsb>,#<width - 1>
+// Instruction details available in ARM DDI 0406A, A8-464.
+// cond(31-28) | 01111(27-23)| 1(22) | 1(21) | widthm1(20-16) |
+// Rd(15-12) | lsb(11-7) | 101(6-4) | Rn(3-0)
+void Assembler::ubfx(Register dst, Register src1, const Operand& src2,
+ const Operand& src3, Condition cond) {
+ ASSERT(!src2.rm_.is_valid() && !src3.rm_.is_valid());
+ ASSERT(static_cast<uint32_t>(src2.imm32_) <= 0x1f);
+ ASSERT(static_cast<uint32_t>(src3.imm32_) <= 0x1f);
+ emit(cond | 0x3F*B21 | src3.imm32_*B16 |
+ dst.code()*B12 | src2.imm32_*B7 | 0x5*B4 | src1.code());
+}
+
+
void Assembler::and_(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) {
addrmod1(cond | 0*B21 | s, src1, dst, src2);
@@ -886,7 +911,7 @@ void Assembler::add(Register dst, Register src1, const Operand& src2,
if (FLAG_push_pop_elimination &&
last_bound_pos_ <= (pc_offset() - pattern_size) &&
reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
- // pattern
+ // Pattern.
instr_at(pc_ - 1 * kInstrSize) == kPopInstruction &&
(instr_at(pc_ - 2 * kInstrSize) & ~RdMask) == kPushRegPattern) {
pc_ -= 2 * kInstrSize;
@@ -960,7 +985,7 @@ void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) {
}
-// Multiply instructions
+// Multiply instructions.
void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
SBit s, Condition cond) {
ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
@@ -1029,7 +1054,7 @@ void Assembler::umull(Register dstL,
}
-// Miscellaneous arithmetic instructions
+// Miscellaneous arithmetic instructions.
void Assembler::clz(Register dst, Register src, Condition cond) {
// v5 and above.
ASSERT(!dst.is(pc) && !src.is(pc));
@@ -1038,7 +1063,7 @@ void Assembler::clz(Register dst, Register src, Condition cond) {
}
-// Status register access instructions
+// Status register access instructions.
void Assembler::mrs(Register dst, SRegister s, Condition cond) {
ASSERT(!dst.is(pc));
emit(cond | B24 | s | 15*B16 | dst.code()*B12);
@@ -1050,12 +1075,12 @@ void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
ASSERT(fields >= B16 && fields < B20); // at least one field set
Instr instr;
if (!src.rm_.is_valid()) {
- // immediate
+ // Immediate.
uint32_t rotate_imm;
uint32_t immed_8;
if (MustUseIp(src.rmode_) ||
!fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
- // immediate operand cannot be encoded, load it first to register ip
+ // Immediate operand cannot be encoded, load it first to register ip.
RecordRelocInfo(src.rmode_, src.imm32_);
ldr(ip, MemOperand(pc, 0), cond);
msr(fields, Operand(ip), cond);
@@ -1070,7 +1095,7 @@ void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
}
-// Load/Store instructions
+// Load/Store instructions.
void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
if (dst.is(pc)) {
WriteRecordedPositions();
@@ -1085,7 +1110,7 @@ void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
if (FLAG_push_pop_elimination &&
last_bound_pos_ <= (pc_offset() - pattern_size) &&
reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
- // pattern
+ // Pattern.
instr_at(pc_ - 1 * kInstrSize) == (kPopRegPattern | dst.code() * B12) &&
instr_at(pc_ - 2 * kInstrSize) == (kPushRegPattern | dst.code() * B12)) {
pc_ -= 2 * kInstrSize;
@@ -1106,6 +1131,7 @@ void Assembler::str(Register src, const MemOperand& dst, Condition cond) {
if (FLAG_push_pop_elimination &&
last_bound_pos_ <= (pc_offset() - pattern_size) &&
reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
+ // Pattern.
instr_at(pc_ - 1 * kInstrSize) == (kPushRegPattern | src.code() * B12) &&
instr_at(pc_ - 2 * kInstrSize) == kPopInstruction) {
pc_ -= 2 * kInstrSize;
@@ -1147,17 +1173,17 @@ void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) {
}
-// Load/Store multiple instructions
+// Load/Store multiple instructions.
void Assembler::ldm(BlockAddrMode am,
Register base,
RegList dst,
Condition cond) {
- // ABI stack constraint: ldmxx base, {..sp..} base != sp is not restartable
+ // ABI stack constraint: ldmxx base, {..sp..} base != sp is not restartable.
ASSERT(base.is(sp) || (dst & sp.bit()) == 0);
addrmod4(cond | B27 | am | L, base, dst);
- // emit the constant pool after a function return implemented by ldm ..{..pc}
+ // Emit the constant pool after a function return implemented by ldm ..{..pc}.
if (cond == al && (dst & pc.bit()) != 0) {
// There is a slight chance that the ldm instruction was actually a call,
// in which case it would be wrong to return into the constant pool; we
@@ -1177,7 +1203,7 @@ void Assembler::stm(BlockAddrMode am,
}
-// Semaphore instructions
+// Semaphore instructions.
void Assembler::swp(Register dst, Register src, Register base, Condition cond) {
ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc));
ASSERT(!dst.is(base) && !src.is(base));
@@ -1197,7 +1223,7 @@ void Assembler::swpb(Register dst,
}
-// Exception-generating instructions and debugging support
+// Exception-generating instructions and debugging support.
void Assembler::stop(const char* msg) {
#if !defined(__arm__)
// The simulator handles these special instructions and stops execution.
@@ -1222,7 +1248,7 @@ void Assembler::swi(uint32_t imm24, Condition cond) {
}
-// Coprocessor instructions
+// Coprocessor instructions.
void Assembler::cdp(Coprocessor coproc,
int opcode_1,
CRegister crd,
@@ -1307,7 +1333,7 @@ void Assembler::ldc(Coprocessor coproc,
int option,
LFlag l,
Condition cond) {
- // unindexed addressing
+ // Unindexed addressing.
ASSERT(is_uint8(option));
emit(cond | B27 | B26 | U | l | L | rn.code()*B16 | crd.code()*B12 |
coproc*B8 | (option & 255));
@@ -1346,7 +1372,7 @@ void Assembler::stc(Coprocessor coproc,
int option,
LFlag l,
Condition cond) {
- // unindexed addressing
+ // Unindexed addressing.
ASSERT(is_uint8(option));
emit(cond | B27 | B26 | U | l | rn.code()*B16 | crd.code()*B12 |
coproc*B8 | (option & 255));
@@ -1371,6 +1397,36 @@ void Assembler::stc2(Coprocessor coproc,
// Support for VFP.
+void Assembler::vldr(const DwVfpRegister dst,
+ const Register base,
+ int offset,
+ const Condition cond) {
+ // Ddst = MEM(Rbase + offset).
+ // Instruction details available in ARM DDI 0406A, A8-628.
+ // cond(31-28) | 1101(27-24)| 1001(23-20) | Rbase(19-16) |
+ // Vdst(15-12) | 1011(11-8) | offset
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(offset % 4 == 0);
+ emit(cond | 0xD9*B20 | base.code()*B16 | dst.code()*B12 |
+ 0xB*B8 | ((offset / 4) & 255));
+}
+
+
+void Assembler::vstr(const DwVfpRegister src,
+ const Register base,
+ int offset,
+ const Condition cond) {
+ // MEM(Rbase + offset) = Dsrc.
+ // Instruction details available in ARM DDI 0406A, A8-786.
+ // cond(31-28) | 1101(27-24)| 1000(23-20) | | Rbase(19-16) |
+ // Vsrc(15-12) | 1011(11-8) | (offset/4)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(offset % 4 == 0);
+ emit(cond | 0xD8*B20 | base.code()*B16 | src.code()*B12 |
+ 0xB*B8 | ((offset / 4) & 255));
+}
+
+
void Assembler::vmov(const DwVfpRegister dst,
const Register src1,
const Register src2,
@@ -1434,7 +1490,7 @@ void Assembler::vcvt(const DwVfpRegister dst,
const Condition cond) {
// Dd = Sm (integer in Sm converted to IEEE 64-bit doubles in Dd).
// Instruction details available in ARM DDI 0406A, A8-576.
- // cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) |opc2=000(18-16) |
+ // cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) | opc2=000(18-16) |
// Vd(15-12) | 101(11-9) | sz(8)=1 | op(7)=1 | 1(6) | M=?(5) | 0(4) | Vm(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(cond | 0xE*B24 | B23 | 0x3*B20 | B19 |
@@ -1541,14 +1597,14 @@ void Assembler::vmrs(Register dst, Condition cond) {
}
-// Pseudo instructions
+// Pseudo instructions.
void Assembler::lea(Register dst,
const MemOperand& x,
SBit s,
Condition cond) {
int am = x.am_;
if (!x.rm_.is_valid()) {
- // immediate offset
+ // Immediate offset.
if ((am & P) == 0) // post indexing
mov(dst, Operand(x.rn_), s, cond);
else if ((am & U) == 0) // negative indexing
@@ -1582,7 +1638,7 @@ void Assembler::BlockConstPoolFor(int instructions) {
}
-// Debugging
+// Debugging.
void Assembler::RecordJSReturn() {
WriteRecordedPositions();
CheckBuffer();
@@ -1635,7 +1691,7 @@ void Assembler::WriteRecordedPositions() {
void Assembler::GrowBuffer() {
if (!own_buffer_) FATAL("external code buffer is too small");
- // compute new buffer size
+ // Compute new buffer size.
CodeDesc desc; // the new buffer
if (buffer_size_ < 4*KB) {
desc.buffer_size = 4*KB;
@@ -1646,20 +1702,20 @@ void Assembler::GrowBuffer() {
}
CHECK_GT(desc.buffer_size, 0); // no overflow
- // setup new buffer
+ // Setup new buffer.
desc.buffer = NewArray<byte>(desc.buffer_size);
desc.instr_size = pc_offset();
desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
- // copy the data
+ // Copy the data.
int pc_delta = desc.buffer - buffer_;
int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
memmove(desc.buffer, buffer_, desc.instr_size);
memmove(reloc_info_writer.pos() + rc_delta,
reloc_info_writer.pos(), desc.reloc_size);
- // switch buffers
+ // Switch buffers.
DeleteArray(buffer_);
buffer_ = desc.buffer;
buffer_size_ = desc.buffer_size;
@@ -1667,11 +1723,11 @@ void Assembler::GrowBuffer() {
reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
reloc_info_writer.last_pc() + pc_delta);
- // none of our relocation types are pc relative pointing outside the code
+ // None of our relocation types are pc relative pointing outside the code
// buffer nor pc absolute pointing inside the code buffer, so there is no need
- // to relocate any emitted relocation entries
+ // to relocate any emitted relocation entries.
- // relocate pending relocation entries
+ // Relocate pending relocation entries.
for (int i = 0; i < num_prinfo_; i++) {
RelocInfo& rinfo = prinfo_[i];
ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
@@ -1686,16 +1742,16 @@ void Assembler::GrowBuffer() {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
RelocInfo rinfo(pc_, rmode, data); // we do not try to reuse pool constants
if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::STATEMENT_POSITION) {
- // Adjust code for new modes
+ // Adjust code for new modes.
ASSERT(RelocInfo::IsJSReturn(rmode)
|| RelocInfo::IsComment(rmode)
|| RelocInfo::IsPosition(rmode));
- // these modes do not need an entry in the constant pool
+ // These modes do not need an entry in the constant pool.
} else {
ASSERT(num_prinfo_ < kMaxNumPRInfo);
prinfo_[num_prinfo_++] = rinfo;
// Make sure the constant pool is not emitted in place of the next
- // instruction for which we just recorded relocation info
+ // instruction for which we just recorded relocation info.
BlockConstPoolBefore(pc_offset() + kInstrSize);
}
if (rinfo.rmode() != RelocInfo::NONE) {
@@ -1722,7 +1778,7 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// blocked for a specific range.
next_buffer_check_ = pc_offset() + kCheckConstInterval;
- // There is nothing to do if there are no pending relocation info entries
+ // There is nothing to do if there are no pending relocation info entries.
if (num_prinfo_ == 0) return;
// We emit a constant pool at regular intervals of about kDistBetweenPools
@@ -1748,10 +1804,11 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// no_const_pool_before_, which is checked here. Also, recursive calls to
// CheckConstPool are blocked by no_const_pool_before_.
if (pc_offset() < no_const_pool_before_) {
- // Emission is currently blocked; make sure we try again as soon as possible
+ // Emission is currently blocked; make sure we try again as soon as
+ // possible.
next_buffer_check_ = no_const_pool_before_;
- // Something is wrong if emission is forced and blocked at the same time
+ // Something is wrong if emission is forced and blocked at the same time.
ASSERT(!force_emit);
return;
}
@@ -1765,23 +1822,23 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
jump_instr + kInstrSize + num_prinfo_*(kInstrSize + kMaxRelocSize);
while (buffer_space() <= (max_needed_space + kGap)) GrowBuffer();
- // Block recursive calls to CheckConstPool
+ // Block recursive calls to CheckConstPool.
BlockConstPoolBefore(pc_offset() + jump_instr + kInstrSize +
num_prinfo_*kInstrSize);
// Don't bother to check for the emit calls below.
next_buffer_check_ = no_const_pool_before_;
- // Emit jump over constant pool if necessary
+ // Emit jump over constant pool if necessary.
Label after_pool;
if (require_jump) b(&after_pool);
RecordComment("[ Constant Pool");
- // Put down constant pool marker
- // "Undefined instruction" as specified by A3.1 Instruction set encoding
+ // Put down constant pool marker "Undefined instruction" as specified by
+ // A3.1 Instruction set encoding.
emit(0x03000000 | num_prinfo_);
- // Emit constant pool entries
+ // Emit constant pool entries.
for (int i = 0; i < num_prinfo_; i++) {
RelocInfo& rinfo = prinfo_[i];
ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
@@ -1789,8 +1846,8 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
rinfo.rmode() != RelocInfo::STATEMENT_POSITION);
Instr instr = instr_at(rinfo.pc());
- // Instruction to patch must be a ldr/str [pc, #offset]
- // P and U set, B and W clear, Rn == pc, offset12 still 0
+ // Instruction to patch must be a ldr/str [pc, #offset].
+ // P and U set, B and W clear, Rn == pc, offset12 still 0.
ASSERT((instr & (7*B25 | P | U | B | W | 15*B16 | Off12Mask)) ==
(2*B25 | P | U | pc.code()*B16));
int delta = pc_ - rinfo.pc() - 8;
diff --git a/deps/v8/src/arm/assembler-thumb2.h b/deps/v8/src/arm/assembler-thumb2.h
index 31e948726..869ac4619 100644
--- a/deps/v8/src/arm/assembler-thumb2.h
+++ b/deps/v8/src/arm/assembler-thumb2.h
@@ -30,9 +30,9 @@
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
-// The original source code covered by the above license above has been modified
-// significantly by Google Inc.
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2010 the V8 project authors. All rights reserved.
// A light-weight ARM Assembler
// Generates user mode instructions for the ARM architecture up to version 5
@@ -80,7 +80,7 @@ struct Register {
return 1 << code_;
}
- // (unfortunately we can't make this private in a struct)
+ // Unfortunately we can't make this private in a struct.
int code_;
};
@@ -205,7 +205,7 @@ struct CRegister {
return 1 << code_;
}
- // (unfortunately we can't make this private in a struct)
+ // Unfortunately we can't make this private in a struct.
int code_;
};
@@ -250,7 +250,7 @@ enum Coprocessor {
};
-// Condition field in instructions
+// Condition field in instructions.
enum Condition {
eq = 0 << 28, // Z set equal.
ne = 1 << 28, // Z clear not equal.
@@ -398,8 +398,6 @@ class Operand BASE_EMBEDDED {
RelocInfo::Mode rmode = RelocInfo::NONE));
INLINE(explicit Operand(const ExternalReference& f));
INLINE(explicit Operand(const char* s));
- INLINE(explicit Operand(Object** opp));
- INLINE(explicit Operand(Context** cpp));
explicit Operand(Handle<Object> handle);
INLINE(explicit Operand(Smi* value));
@@ -630,6 +628,9 @@ class Assembler : public Malloced {
void blx(Label* L) { blx(branch_offset(L, false)); } // v5 and above
// Data-processing instructions
+ void ubfx(Register dst, Register src1, const Operand& src2,
+ const Operand& src3, Condition cond = al);
+
void and_(Register dst, Register src1, const Operand& src2,
SBit s = LeaveCC, Condition cond = al);
@@ -796,6 +797,14 @@ class Assembler : public Malloced {
// However, some simple modifications can allow
// these APIs to support D16 to D31.
+ void vldr(const DwVfpRegister dst,
+ const Register base,
+ int offset, // Offset must be a multiple of 4.
+ const Condition cond = al);
+ void vstr(const DwVfpRegister src,
+ const Register base,
+ int offset, // Offset must be a multiple of 4.
+ const Condition cond = al);
void vmov(const DwVfpRegister dst,
const Register src1,
const Register src2,
diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc
index ae7dae3b0..edb1b0ae7 100644
--- a/deps/v8/src/arm/builtins-arm.cc
+++ b/deps/v8/src/arm/builtins-arm.cc
@@ -499,7 +499,10 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// r0: number of arguments
// r1: called object
__ bind(&non_function_call);
-
+ // CALL_NON_FUNCTION expects the non-function constructor as receiver
+ // (instead of the original receiver from the call site). The receiver is
+ // stack element argc.
+ __ str(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
// Set expected number of arguments to zero (not changing r0).
__ mov(r2, Operand(0));
__ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
@@ -904,7 +907,7 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// 1. Make sure we have at least one argument.
- // r0: actual number of argument
+ // r0: actual number of arguments
{ Label done;
__ tst(r0, Operand(r0));
__ b(ne, &done);
@@ -914,40 +917,31 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ bind(&done);
}
- // 2. Get the function to call from the stack.
- // r0: actual number of argument
- { Label done, non_function, function;
- __ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &non_function);
- __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
- __ b(eq, &function);
-
- // Non-function called: Clear the function to force exception.
- __ bind(&non_function);
- __ mov(r1, Operand(0));
- __ b(&done);
-
- // Change the context eagerly because it will be used below to get the
- // right global object.
- __ bind(&function);
- __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
-
- __ bind(&done);
- }
+ // 2. Get the function to call (passed as receiver) from the stack, check
+ // if it is a function.
+ // r0: actual number of arguments
+ Label non_function;
+ __ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
+ __ tst(r1, Operand(kSmiTagMask));
+ __ b(eq, &non_function);
+ __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
+ __ b(ne, &non_function);
- // 3. Make sure first argument is an object; convert if necessary.
+ // 3a. Patch the first argument if necessary when calling a function.
// r0: actual number of arguments
// r1: function
- { Label call_to_object, use_global_receiver, patch_receiver, done;
+ Label shift_arguments;
+ { Label convert_to_object, use_global_receiver, patch_receiver;
+ // Change context eagerly in case we need the global receiver.
+ __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+
__ add(r2, sp, Operand(r0, LSL, kPointerSizeLog2));
__ ldr(r2, MemOperand(r2, -kPointerSize));
-
// r0: actual number of arguments
// r1: function
// r2: first argument
__ tst(r2, Operand(kSmiTagMask));
- __ b(eq, &call_to_object);
+ __ b(eq, &convert_to_object);
__ LoadRoot(r3, Heap::kNullValueRootIndex);
__ cmp(r2, r3);
@@ -957,31 +951,28 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ b(eq, &use_global_receiver);
__ CompareObjectType(r2, r3, r3, FIRST_JS_OBJECT_TYPE);
- __ b(lt, &call_to_object);
+ __ b(lt, &convert_to_object);
__ cmp(r3, Operand(LAST_JS_OBJECT_TYPE));
- __ b(le, &done);
-
- __ bind(&call_to_object);
- __ EnterInternalFrame();
+ __ b(le, &shift_arguments);
- // Store number of arguments and function across the call into the runtime.
- __ mov(r0, Operand(r0, LSL, kSmiTagSize));
+ __ bind(&convert_to_object);
+ __ EnterInternalFrame(); // In order to preserve argument count.
+ __ mov(r0, Operand(r0, LSL, kSmiTagSize)); // Smi-tagged.
__ push(r0);
- __ push(r1);
__ push(r2);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS);
__ mov(r2, r0);
- // Restore number of arguments and function.
- __ pop(r1);
__ pop(r0);
__ mov(r0, Operand(r0, ASR, kSmiTagSize));
-
__ LeaveInternalFrame();
- __ b(&patch_receiver);
+ // Restore the function to r1.
+ __ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
+ __ jmp(&patch_receiver);
- // Use the global receiver object from the called function as the receiver.
+ // Use the global receiver object from the called function as the
+ // receiver.
__ bind(&use_global_receiver);
const int kGlobalIndex =
Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
@@ -994,16 +985,30 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ add(r3, sp, Operand(r0, LSL, kPointerSizeLog2));
__ str(r2, MemOperand(r3, -kPointerSize));
- __ bind(&done);
+ __ jmp(&shift_arguments);
}
- // 4. Shift stuff one slot down the stack
- // r0: actual number of arguments (including call() receiver)
+ // 3b. Patch the first argument when calling a non-function. The
+ // CALL_NON_FUNCTION builtin expects the non-function callee as
+ // receiver, so overwrite the first argument which will ultimately
+ // become the receiver.
+ // r0: actual number of arguments
+ // r1: function
+ __ bind(&non_function);
+ __ add(r2, sp, Operand(r0, LSL, kPointerSizeLog2));
+ __ str(r1, MemOperand(r2, -kPointerSize));
+ // Clear r1 to indicate a non-function being called.
+ __ mov(r1, Operand(0));
+
+ // 4. Shift arguments and return address one slot down on the stack
+ // (overwriting the original receiver). Adjust argument count to make
+ // the original first argument the new receiver.
+ // r0: actual number of arguments
// r1: function
+ __ bind(&shift_arguments);
{ Label loop;
// Calculate the copy start address (destination). Copy end address is sp.
__ add(r2, sp, Operand(r0, LSL, kPointerSizeLog2));
- __ add(r2, r2, Operand(kPointerSize)); // copy receiver too
__ bind(&loop);
__ ldr(ip, MemOperand(r2, -kPointerSize));
@@ -1011,43 +1016,41 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ sub(r2, r2, Operand(kPointerSize));
__ cmp(r2, sp);
__ b(ne, &loop);
+ // Adjust the actual number of arguments and remove the top element
+ // (which is a copy of the last argument).
+ __ sub(r0, r0, Operand(1));
+ __ pop();
}
- // 5. Adjust the actual number of arguments and remove the top element.
- // r0: actual number of arguments (including call() receiver)
- // r1: function
- __ sub(r0, r0, Operand(1));
- __ add(sp, sp, Operand(kPointerSize));
-
- // 6. Get the code for the function or the non-function builtin.
- // If number of expected arguments matches, then call. Otherwise restart
- // the arguments adaptor stub.
+ // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin.
// r0: actual number of arguments
// r1: function
- { Label invoke;
+ { Label function;
__ tst(r1, r1);
- __ b(ne, &invoke);
+ __ b(ne, &function);
__ mov(r2, Operand(0)); // expected arguments is 0 for CALL_NON_FUNCTION
__ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
__ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
RelocInfo::CODE_TARGET);
+ __ bind(&function);
+ }
- __ bind(&invoke);
- __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r2,
- FieldMemOperand(r3,
- SharedFunctionInfo::kFormalParameterCountOffset));
- __ ldr(r3,
- MemOperand(r3, SharedFunctionInfo::kCodeOffset - kHeapObjectTag));
- __ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ cmp(r2, r0); // Check formal and actual parameter counts.
- __ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
- RelocInfo::CODE_TARGET, ne);
+ // 5b. Get the code to call from the function and check that the number of
+ // expected arguments matches what we're providing. If so, jump
+ // (tail-call) to the code in register edx without checking arguments.
+ // r0: actual number of arguments
+ // r1: function
+ __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r2,
+ FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset));
+ __ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ cmp(r2, r0); // Check formal and actual parameter counts.
+ __ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
+ RelocInfo::CODE_TARGET, ne);
- // 7. Jump to the code in r3 without checking arguments.
- ParameterCount expected(0);
- __ InvokeCode(r3, expected, expected, JUMP_FUNCTION);
- }
+ ParameterCount expected(0);
+ __ InvokeCode(r3, expected, expected, JUMP_FUNCTION);
}
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index ea4b165fd..046d7b95b 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -121,14 +121,10 @@ CodeGenState::~CodeGenState() {
// -------------------------------------------------------------------------
// CodeGenerator implementation
-CodeGenerator::CodeGenerator(MacroAssembler* masm,
- Handle<Script> script,
- bool is_eval)
- : is_eval_(is_eval),
- script_(script),
- deferred_(8),
+CodeGenerator::CodeGenerator(MacroAssembler* masm)
+ : deferred_(8),
masm_(masm),
- scope_(NULL),
+ info_(NULL),
frame_(NULL),
allocator_(NULL),
cc_reg_(al),
@@ -137,23 +133,21 @@ CodeGenerator::CodeGenerator(MacroAssembler* masm,
}
+Scope* CodeGenerator::scope() { return info_->function()->scope(); }
+
+
// Calling conventions:
// fp: caller's frame pointer
// sp: stack pointer
// r1: called JS function
// cp: callee's context
-void CodeGenerator::Generate(FunctionLiteral* fun,
- Mode mode,
- CompilationInfo* info) {
+void CodeGenerator::Generate(CompilationInfo* info, Mode mode) {
// Record the position for debugging purposes.
- CodeForFunctionPosition(fun);
-
- ZoneList<Statement*>* body = fun->body();
+ CodeForFunctionPosition(info->function());
// Initialize state.
- ASSERT(scope_ == NULL);
- scope_ = fun->scope();
+ info_ = info;
ASSERT(allocator_ == NULL);
RegisterAllocator register_allocator(this);
allocator_ = &register_allocator;
@@ -174,7 +168,7 @@ void CodeGenerator::Generate(FunctionLiteral* fun,
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
- fun->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
+ info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
frame_->SpillAll();
__ stop("stop-at");
}
@@ -189,7 +183,7 @@ void CodeGenerator::Generate(FunctionLiteral* fun,
frame_->AllocateStackSlots();
VirtualFrame::SpilledScope spilled_scope;
- int heap_slots = scope_->num_heap_slots();
+ int heap_slots = scope()->num_heap_slots();
if (heap_slots > 0) {
// Allocate local context.
// Get outer context and create a new context based on it.
@@ -219,7 +213,6 @@ void CodeGenerator::Generate(FunctionLiteral* fun,
// 3) don't copy parameter operand code from SlotOperand!
{
Comment cmnt2(masm_, "[ copy context parameters into .context");
-
// Note that iteration order is relevant here! If we have the same
// parameter twice (e.g., function (x, y, x)), and that parameter
// needs to be copied into the context, it must be the last argument
@@ -228,12 +221,11 @@ void CodeGenerator::Generate(FunctionLiteral* fun,
// order: such a parameter is copied repeatedly into the same
// context location and thus the last value is what is seen inside
// the function.
- for (int i = 0; i < scope_->num_parameters(); i++) {
- Variable* par = scope_->parameter(i);
+ for (int i = 0; i < scope()->num_parameters(); i++) {
+ Variable* par = scope()->parameter(i);
Slot* slot = par->slot();
if (slot != NULL && slot->type() == Slot::CONTEXT) {
- // No parameters in global scope.
- ASSERT(!scope_->is_global_scope());
+ ASSERT(!scope()->is_global_scope()); // No params in global scope.
__ ldr(r1, frame_->ParameterAt(i));
// Loads r2 with context; used below in RecordWrite.
__ str(r1, SlotOperand(slot, r2));
@@ -249,20 +241,20 @@ void CodeGenerator::Generate(FunctionLiteral* fun,
// Store the arguments object. This must happen after context
// initialization because the arguments object may be stored in the
// context.
- if (scope_->arguments() != NULL) {
+ if (scope()->arguments() != NULL) {
Comment cmnt(masm_, "[ allocate arguments object");
- ASSERT(scope_->arguments_shadow() != NULL);
- Variable* arguments = scope_->arguments()->var();
- Variable* shadow = scope_->arguments_shadow()->var();
+ ASSERT(scope()->arguments_shadow() != NULL);
+ Variable* arguments = scope()->arguments()->var();
+ Variable* shadow = scope()->arguments_shadow()->var();
ASSERT(arguments != NULL && arguments->slot() != NULL);
ASSERT(shadow != NULL && shadow->slot() != NULL);
ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
__ ldr(r2, frame_->Function());
// The receiver is below the arguments, the return address, and the
// frame pointer on the stack.
- const int kReceiverDisplacement = 2 + scope_->num_parameters();
+ const int kReceiverDisplacement = 2 + scope()->num_parameters();
__ add(r1, fp, Operand(kReceiverDisplacement * kPointerSize));
- __ mov(r0, Operand(Smi::FromInt(scope_->num_parameters())));
+ __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
frame_->Adjust(3);
__ stm(db_w, sp, r0.bit() | r1.bit() | r2.bit());
frame_->CallStub(&stub, 3);
@@ -273,10 +265,10 @@ void CodeGenerator::Generate(FunctionLiteral* fun,
}
// Initialize ThisFunction reference if present.
- if (scope_->is_function_scope() && scope_->function() != NULL) {
+ if (scope()->is_function_scope() && scope()->function() != NULL) {
__ mov(ip, Operand(Factory::the_hole_value()));
frame_->EmitPush(ip);
- StoreToSlot(scope_->function()->slot(), NOT_CONST_INIT);
+ StoreToSlot(scope()->function()->slot(), NOT_CONST_INIT);
}
} else {
// When used as the secondary compiler for splitting, r1, cp,
@@ -295,12 +287,12 @@ void CodeGenerator::Generate(FunctionLiteral* fun,
// Generate code to 'execute' declarations and initialize functions
// (source elements). In case of an illegal redeclaration we need to
// handle that instead of processing the declarations.
- if (scope_->HasIllegalRedeclaration()) {
+ if (scope()->HasIllegalRedeclaration()) {
Comment cmnt(masm_, "[ illegal redeclarations");
- scope_->VisitIllegalRedeclaration(this);
+ scope()->VisitIllegalRedeclaration(this);
} else {
Comment cmnt(masm_, "[ declarations");
- ProcessDeclarations(scope_->declarations());
+ ProcessDeclarations(scope()->declarations());
// Bail out if a stack-overflow exception occurred when processing
// declarations.
if (HasStackOverflow()) return;
@@ -314,7 +306,7 @@ void CodeGenerator::Generate(FunctionLiteral* fun,
// Compile the body of the function in a vanilla state. Don't
// bother compiling all the code if the scope has an illegal
// redeclaration.
- if (!scope_->HasIllegalRedeclaration()) {
+ if (!scope()->HasIllegalRedeclaration()) {
Comment cmnt(masm_, "[ function body");
#ifdef DEBUG
bool is_builtin = Bootstrapper::IsActive();
@@ -325,14 +317,14 @@ void CodeGenerator::Generate(FunctionLiteral* fun,
// Ignore the return value.
}
#endif
- VisitStatementsAndSpill(body);
+ VisitStatementsAndSpill(info->function()->body());
}
}
// Generate the return sequence if necessary.
if (has_valid_frame() || function_return_.is_linked()) {
if (!function_return_.is_linked()) {
- CodeForReturnPosition(fun);
+ CodeForReturnPosition(info->function());
}
// exit
// r0: result
@@ -355,7 +347,7 @@ void CodeGenerator::Generate(FunctionLiteral* fun,
// Calculate the exact length of the return sequence and make sure that
// the constant pool is not emitted inside of the return sequence.
- int32_t sp_delta = (scope_->num_parameters() + 1) * kPointerSize;
+ int32_t sp_delta = (scope()->num_parameters() + 1) * kPointerSize;
int return_sequence_length = Assembler::kJSReturnSequenceLength;
if (!masm_->ImmediateFitsAddrMode1Instruction(sp_delta)) {
// Additional mov instruction generated.
@@ -395,7 +387,6 @@ void CodeGenerator::Generate(FunctionLiteral* fun,
}
allocator_ = NULL;
- scope_ = NULL;
}
@@ -2302,8 +2293,7 @@ void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
Comment cmnt(masm_, "[ DebuggerStatament");
CodeForStatementPosition(node);
#ifdef ENABLE_DEBUGGER_SUPPORT
- DebuggerStatementStub ces;
- frame_->CallStub(&ces, 0);
+ frame_->DebugBreak();
#endif
// Ignore the return value.
ASSERT(frame_->height() == original_height);
@@ -2341,7 +2331,7 @@ void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
// Build the function boilerplate and instantiate it.
Handle<JSFunction> boilerplate =
- Compiler::BuildBoilerplate(node, script_, this);
+ Compiler::BuildBoilerplate(node, script(), this);
// Check for stack-overflow exception.
if (HasStackOverflow()) {
ASSERT(frame_->height() == original_height);
@@ -2728,9 +2718,9 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
frame_->CallRuntime(Runtime::kCreateObjectLiteralShallow, 3);
}
frame_->EmitPush(r0); // save the result
- // r0: created object literal
-
for (int i = 0; i < node->properties()->length(); i++) {
+ // At the start of each iteration, the top of stack contains
+ // the newly created object literal.
ObjectLiteral::Property* property = node->properties()->at(i);
Literal* key = property->key();
Expression* value = property->value();
@@ -2740,34 +2730,43 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
case ObjectLiteral::Property::MATERIALIZED_LITERAL:
if (CompileTimeValue::IsCompileTimeValue(property->value())) break;
// else fall through
- case ObjectLiteral::Property::COMPUTED: // fall through
+ case ObjectLiteral::Property::COMPUTED:
+ if (key->handle()->IsSymbol()) {
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ LoadAndSpill(value);
+ frame_->EmitPop(r0);
+ __ mov(r2, Operand(key->handle()));
+ __ ldr(r1, frame_->Top()); // Load the receiver.
+ frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
+ break;
+ }
+ // else fall through
case ObjectLiteral::Property::PROTOTYPE: {
+ __ ldr(r0, frame_->Top());
frame_->EmitPush(r0); // dup the result
LoadAndSpill(key);
LoadAndSpill(value);
frame_->CallRuntime(Runtime::kSetProperty, 3);
- // restore r0
- __ ldr(r0, frame_->Top());
break;
}
case ObjectLiteral::Property::SETTER: {
+ __ ldr(r0, frame_->Top());
frame_->EmitPush(r0);
LoadAndSpill(key);
__ mov(r0, Operand(Smi::FromInt(1)));
frame_->EmitPush(r0);
LoadAndSpill(value);
frame_->CallRuntime(Runtime::kDefineAccessor, 4);
- __ ldr(r0, frame_->Top());
break;
}
case ObjectLiteral::Property::GETTER: {
+ __ ldr(r0, frame_->Top());
frame_->EmitPush(r0);
LoadAndSpill(key);
__ mov(r0, Operand(Smi::FromInt(0)));
frame_->EmitPush(r0);
LoadAndSpill(value);
frame_->CallRuntime(Runtime::kDefineAccessor, 4);
- __ ldr(r0, frame_->Top());
break;
}
}
@@ -2785,17 +2784,19 @@ void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
// Load the function of this activation.
__ ldr(r2, frame_->Function());
- // Literals array.
+ // Load the literals array of the function.
__ ldr(r2, FieldMemOperand(r2, JSFunction::kLiteralsOffset));
- // Literal index.
__ mov(r1, Operand(Smi::FromInt(node->literal_index())));
- // Constant elements.
__ mov(r0, Operand(node->constant_elements()));
frame_->EmitPushMultiple(3, r2.bit() | r1.bit() | r0.bit());
+ int length = node->values()->length();
if (node->depth() > 1) {
frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3);
- } else {
+ } else if (length > FastCloneShallowArrayStub::kMaximumLength) {
frame_->CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
+ } else {
+ FastCloneShallowArrayStub stub(length);
+ frame_->CallStub(&stub, 3);
}
frame_->EmitPush(r0); // save the result
// r0: created object literal
@@ -3022,11 +3023,6 @@ void CodeGenerator::VisitCall(Call* node) {
// ----------------------------------
// JavaScript example: 'foo(1, 2, 3)' // foo is global
// ----------------------------------
-
- // Push the name of the function and the receiver onto the stack.
- __ mov(r0, Operand(var->name()));
- frame_->EmitPush(r0);
-
// Pass the global object as the receiver and let the IC stub
// patch the stack to use the global proxy as 'this' in the
// invoked function.
@@ -3038,15 +3034,14 @@ void CodeGenerator::VisitCall(Call* node) {
LoadAndSpill(args->at(i));
}
- // Setup the receiver register and call the IC initialization code.
+ // Setup the name register and call the IC initialization code.
+ __ mov(r2, Operand(var->name()));
InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
CodeForSourcePosition(node->position());
frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET_CONTEXT,
arg_count + 1);
__ ldr(cp, frame_->Context());
- // Remove the function from the stack.
- frame_->Drop();
frame_->EmitPush(r0);
} else if (var != NULL && var->slot() != NULL &&
@@ -3079,28 +3074,21 @@ void CodeGenerator::VisitCall(Call* node) {
// JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)'
// ------------------------------------------------------------------
- // Push the name of the function and the receiver onto the stack.
- __ mov(r0, Operand(literal->handle()));
- frame_->EmitPush(r0);
- LoadAndSpill(property->obj());
-
+ LoadAndSpill(property->obj()); // Receiver.
// Load the arguments.
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
LoadAndSpill(args->at(i));
}
- // Set the receiver register and call the IC initialization code.
+ // Set the name register and call the IC initialization code.
+ __ mov(r2, Operand(literal->handle()));
InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
CodeForSourcePosition(node->position());
frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
__ ldr(cp, frame_->Context());
-
- // Remove the function from the stack.
- frame_->Drop();
-
- frame_->EmitPush(r0); // push after get rid of function from the stack
+ frame_->EmitPush(r0);
} else {
// -------------------------------------------
@@ -3519,7 +3507,7 @@ void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
// Seed the result with the formal parameters count, which will be used
// in case no arguments adaptor frame is found below the current frame.
- __ mov(r0, Operand(Smi::FromInt(scope_->num_parameters())));
+ __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
// Call the shared stub to get to the arguments.length.
ArgumentsAccessStub stub(ArgumentsAccessStub::READ_LENGTH);
@@ -3536,7 +3524,7 @@ void CodeGenerator::GenerateArgumentsAccess(ZoneList<Expression*>* args) {
// Load the key into r1 and the formal parameters count into r0.
LoadAndSpill(args->at(0));
frame_->EmitPop(r1);
- __ mov(r0, Operand(Smi::FromInt(scope_->num_parameters())));
+ __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
// Call the shared stub to get to arguments[key].
ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
@@ -3560,7 +3548,8 @@ void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
Load(args->at(0));
Load(args->at(1));
- frame_->CallRuntime(Runtime::kStringAdd, 2);
+ StringAddStub stub(NO_STRING_ADD_FLAGS);
+ frame_->CallStub(&stub, 2);
frame_->EmitPush(r0);
}
@@ -3572,7 +3561,8 @@ void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
Load(args->at(1));
Load(args->at(2));
- frame_->CallRuntime(Runtime::kSubString, 3);
+ SubStringStub stub;
+ frame_->CallStub(&stub, 3);
frame_->EmitPush(r0);
}
@@ -3602,6 +3592,17 @@ void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
}
+void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
+ ASSERT_EQ(args->length(), 1);
+
+ // Load the argument on the stack and jump to the runtime.
+ Load(args->at(0));
+
+ frame_->CallRuntime(Runtime::kNumberToString, 1);
+ frame_->EmitPush(r0);
+}
+
+
void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
VirtualFrame::SpilledScope spilled_scope;
ASSERT(args->length() == 2);
@@ -3633,8 +3634,6 @@ void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
if (function == NULL) {
// Prepare stack for calling JS runtime function.
- __ mov(r0, Operand(node->name()));
- frame_->EmitPush(r0);
// Push the builtins object found in the current global object.
__ ldr(r1, GlobalObject());
__ ldr(r0, FieldMemOperand(r1, GlobalObject::kBuiltinsOffset));
@@ -3649,11 +3648,11 @@ void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
if (function == NULL) {
// Call the JS runtime function.
+ __ mov(r2, Operand(node->name()));
InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
__ ldr(cp, frame_->Context());
- frame_->Drop();
frame_->EmitPush(r0);
} else {
// Call the C runtime function.
@@ -4396,11 +4395,11 @@ void Reference::SetValue(InitState init_state) {
Handle<String> name(GetName());
frame->EmitPop(r0);
- // Setup the name register.
+ frame->EmitPop(r1);
__ mov(r2, Operand(name));
frame->CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
frame->EmitPush(r0);
- cgen_->UnloadReference(this);
+ set_unloaded();
break;
}
@@ -4412,7 +4411,6 @@ void Reference::SetValue(InitState init_state) {
// Call IC code.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
- // TODO(1222589): Make the IC grab the values from the stack.
frame->EmitPop(r0); // value
frame->CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
frame->EmitPush(r0);
@@ -4487,7 +4485,7 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
TAG_OBJECT);
// Load the function from the stack.
- __ ldr(r3, MemOperand(sp, 0 * kPointerSize));
+ __ ldr(r3, MemOperand(sp, 0));
// Setup the object header.
__ LoadRoot(r2, Heap::kContextMapRootIndex);
@@ -4523,6 +4521,69 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
}
+void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
+ // Stack layout on entry:
+ //
+ // [sp]: constant elements.
+ // [sp + kPointerSize]: literal index.
+ // [sp + (2 * kPointerSize)]: literals array.
+
+ // All sizes here are multiples of kPointerSize.
+ int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
+ int size = JSArray::kSize + elements_size;
+
+ // Load boilerplate object into r3 and check if we need to create a
+ // boilerplate.
+ Label slow_case;
+ __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
+ __ ldr(r0, MemOperand(sp, 1 * kPointerSize));
+ __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ ldr(r3, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ cmp(r3, ip);
+ __ b(eq, &slow_case);
+
+ // Allocate both the JS array and the elements array in one big
+ // allocation. This avoids multiple limit checks.
+ __ AllocateInNewSpace(size / kPointerSize,
+ r0,
+ r1,
+ r2,
+ &slow_case,
+ TAG_OBJECT);
+
+ // Copy the JS array part.
+ for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
+ if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
+ __ ldr(r1, FieldMemOperand(r3, i));
+ __ str(r1, FieldMemOperand(r0, i));
+ }
+ }
+
+ if (length_ > 0) {
+ // Get hold of the elements array of the boilerplate and setup the
+ // elements pointer in the resulting object.
+ __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
+ __ add(r2, r0, Operand(JSArray::kSize));
+ __ str(r2, FieldMemOperand(r0, JSArray::kElementsOffset));
+
+ // Copy the elements array.
+ for (int i = 0; i < elements_size; i += kPointerSize) {
+ __ ldr(r1, FieldMemOperand(r3, i));
+ __ str(r1, FieldMemOperand(r2, i));
+ }
+ }
+
+ // Return and remove the on-stack parameters.
+ __ add(sp, sp, Operand(3 * kPointerSize));
+ __ Ret();
+
+ __ bind(&slow_case);
+ ExternalReference runtime(Runtime::kCreateArrayLiteralShallow);
+ __ TailCallRuntime(runtime, 3, 1);
+}
+
+
// Count leading zeros in a 32 bit word. On ARM5 and later it uses the clz
// instruction. On pre-ARM5 hardware this routine gives the wrong answer for 0
// (31 instead of 32).
@@ -5340,7 +5401,7 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
// r1 : first argument
// r0 : second argument
// sp[0] : second argument
- // sp[1] : first argument
+ // sp[4] : first argument
Label not_strings, not_string1, string1;
__ tst(r1, Operand(kSmiTagMask));
@@ -5355,7 +5416,8 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
__ b(ge, &string1);
// First and second argument are strings.
- __ TailCallRuntime(ExternalReference(Runtime::kStringAdd), 2, 1);
+ StringAddStub stub(NO_STRING_CHECK_IN_STUB);
+ __ TailCallStub(&stub);
// Only first argument is a string.
__ bind(&string1);
@@ -5369,7 +5431,6 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
__ b(ge, &not_strings);
// Only second argument is a string.
- __ b(&not_strings);
__ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_JS);
__ bind(&not_strings);
@@ -5851,6 +5912,7 @@ const char* GenericBinaryOpStub::GetName() {
}
+
void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
// r1 : x
// r0 : y
@@ -6043,9 +6105,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
case Token::BIT_XOR: __ eor(r0, r0, Operand(r1)); break;
case Token::SAR:
// Remove tags from right operand.
- __ mov(r2, Operand(r0, ASR, kSmiTagSize)); // y
- // Use only the 5 least significant bits of the shift count.
- __ and_(r2, r2, Operand(0x1f));
+ __ GetLeastBitsFromSmi(r2, r0, 5);
__ mov(r0, Operand(r1, ASR, r2));
// Smi tag result.
__ bic(r0, r0, Operand(kSmiTagMask));
@@ -6054,9 +6114,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
// Remove tags from operands. We can't do this on a 31 bit number
// because then the 0s get shifted into bit 30 instead of bit 31.
__ mov(r3, Operand(r1, ASR, kSmiTagSize)); // x
- __ mov(r2, Operand(r0, ASR, kSmiTagSize)); // y
- // Use only the 5 least significant bits of the shift count.
- __ and_(r2, r2, Operand(0x1f));
+ __ GetLeastBitsFromSmi(r2, r0, 5);
__ mov(r3, Operand(r3, LSR, r2));
// Unsigned shift is not allowed to produce a negative number, so
// check the sign bit and the sign bit after Smi tagging.
@@ -6068,9 +6126,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
case Token::SHL:
// Remove tags from operands.
__ mov(r3, Operand(r1, ASR, kSmiTagSize)); // x
- __ mov(r2, Operand(r0, ASR, kSmiTagSize)); // y
- // Use only the 5 least significant bits of the shift count.
- __ and_(r2, r2, Operand(0x1f));
+ __ GetLeastBitsFromSmi(r2, r0, 5);
__ mov(r3, Operand(r3, LSL, r2));
// Check that the signed result fits in a Smi.
__ add(r2, r3, Operand(0x40000000), SetCC);
@@ -6478,8 +6534,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// r1: function
// r2: receiver
// r3: argc
- __ add(r4, sp, Operand((kNumCalleeSaved + 1)*kPointerSize));
- __ ldr(r4, MemOperand(r4)); // argv
+ __ ldr(r4, MemOperand(sp, (kNumCalleeSaved + 1) * kPointerSize)); // argv
// Push a frame with special values setup to mark it as an entry frame.
// r0: code entry
@@ -6597,7 +6652,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ b(gt, &slow);
// Get the prototype of the function (r4 is result, r2 is scratch).
- __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
+ __ ldr(r1, MemOperand(sp, 0));
__ TryGetFunctionPrototype(r1, r4, r2, &slow);
// Check that the function prototype is a JS object.
@@ -6712,20 +6767,102 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
+ // sp[0] : number of parameters
+ // sp[4] : receiver displacement
+ // sp[8] : function
+
// Check if the calling frame is an arguments adaptor frame.
- Label runtime;
+ Label adaptor_frame, try_allocate, runtime;
__ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
__ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ b(ne, &runtime);
+ __ b(eq, &adaptor_frame);
+
+ // Get the length from the frame.
+ __ ldr(r1, MemOperand(sp, 0));
+ __ b(&try_allocate);
// Patch the arguments.length and the parameters pointer.
- __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ str(r0, MemOperand(sp, 0 * kPointerSize));
- __ add(r3, r2, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ bind(&adaptor_frame);
+ __ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ str(r1, MemOperand(sp, 0));
+ __ add(r3, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
__ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
__ str(r3, MemOperand(sp, 1 * kPointerSize));
+ // Try the new space allocation. Start out with computing the size
+ // of the arguments object and the elements array (in words, not
+ // bytes because AllocateInNewSpace expects words).
+ Label add_arguments_object;
+ __ bind(&try_allocate);
+ __ cmp(r1, Operand(0));
+ __ b(eq, &add_arguments_object);
+ __ mov(r1, Operand(r1, LSR, kSmiTagSize));
+ __ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize));
+ __ bind(&add_arguments_object);
+ __ add(r1, r1, Operand(Heap::kArgumentsObjectSize / kPointerSize));
+
+ // Do the allocation of both objects in one go.
+ __ AllocateInNewSpace(r1, r0, r2, r3, &runtime, TAG_OBJECT);
+
+ // Get the arguments boilerplate from the current (global) context.
+ int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
+ __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ ldr(r4, FieldMemOperand(r4, GlobalObject::kGlobalContextOffset));
+ __ ldr(r4, MemOperand(r4, offset));
+
+ // Copy the JS object part.
+ for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
+ __ ldr(r3, FieldMemOperand(r4, i));
+ __ str(r3, FieldMemOperand(r0, i));
+ }
+
+ // Setup the callee in-object property.
+ ASSERT(Heap::arguments_callee_index == 0);
+ __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
+ __ str(r3, FieldMemOperand(r0, JSObject::kHeaderSize));
+
+ // Get the length (smi tagged) and set that as an in-object property too.
+ ASSERT(Heap::arguments_length_index == 1);
+ __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
+ __ str(r1, FieldMemOperand(r0, JSObject::kHeaderSize + kPointerSize));
+
+ // If there are no actual arguments, we're done.
+ Label done;
+ __ cmp(r1, Operand(0));
+ __ b(eq, &done);
+
+ // Get the parameters pointer from the stack and untag the length.
+ __ ldr(r2, MemOperand(sp, 1 * kPointerSize));
+ __ mov(r1, Operand(r1, LSR, kSmiTagSize));
+
+ // Setup the elements pointer in the allocated arguments object and
+ // initialize the header in the elements fixed array.
+ __ add(r4, r0, Operand(Heap::kArgumentsObjectSize));
+ __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
+ __ LoadRoot(r3, Heap::kFixedArrayMapRootIndex);
+ __ str(r3, FieldMemOperand(r4, FixedArray::kMapOffset));
+ __ str(r1, FieldMemOperand(r4, FixedArray::kLengthOffset));
+
+ // Copy the fixed array slots.
+ Label loop;
+ // Setup r4 to point to the first array slot.
+ __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ bind(&loop);
+ // Pre-decrement r2 with kPointerSize on each iteration.
+ // Pre-decrement in order to skip receiver.
+ __ ldr(r3, MemOperand(r2, kPointerSize, NegPreIndex));
+ // Post-increment r4 with kPointerSize on each iteration.
+ __ str(r3, MemOperand(r4, kPointerSize, PostIndex));
+ __ sub(r1, r1, Operand(1));
+ __ cmp(r1, Operand(0));
+ __ b(ne, &loop);
+
+ // Return and remove the on-stack parameters.
+ __ bind(&done);
+ __ add(sp, sp, Operand(3 * kPointerSize));
+ __ Ret();
+
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
__ TailCallRuntime(ExternalReference(Runtime::kNewArgumentsFast), 3, 1);
@@ -6779,6 +6916,9 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
// Slow-case: Non-function called.
__ bind(&slow);
+ // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
+ // of the original receiver from the call site).
+ __ str(r1, MemOperand(sp, argc_ * kPointerSize));
__ mov(r0, Operand(argc_)); // Setup the number of arguments.
__ mov(r2, Operand(0));
__ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
@@ -6837,6 +6977,340 @@ int CompareStub::MinorKey() {
}
+void StringStubBase::GenerateCopyCharacters(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ Register scratch,
+ bool ascii) {
+ Label loop;
+ Label done;
+ // This loop just copies one character at a time, as it is only used for very
+ // short strings.
+ if (!ascii) {
+ __ add(count, count, Operand(count), SetCC);
+ } else {
+ __ cmp(count, Operand(0));
+ }
+ __ b(eq, &done);
+
+ __ bind(&loop);
+ __ ldrb(scratch, MemOperand(src, 1, PostIndex));
+ // Perform sub between load and dependent store to get the load time to
+ // complete.
+ __ sub(count, count, Operand(1), SetCC);
+ __ strb(scratch, MemOperand(dest, 1, PostIndex));
+ // last iteration.
+ __ b(gt, &loop);
+
+ __ bind(&done);
+}
+
+
+enum CopyCharactersFlags {
+ COPY_ASCII = 1,
+ DEST_ALWAYS_ALIGNED = 2
+};
+
+
+void StringStubBase::GenerateCopyCharactersLong(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Register scratch5,
+ int flags) {
+ bool ascii = (flags & COPY_ASCII) != 0;
+ bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0;
+
+ if (dest_always_aligned && FLAG_debug_code) {
+ // Check that destination is actually word aligned if the flag says
+ // that it is.
+ __ tst(dest, Operand(kPointerAlignmentMask));
+ __ Check(eq, "Destination of copy not aligned.");
+ }
+
+ const int kReadAlignment = 4;
+ const int kReadAlignmentMask = kReadAlignment - 1;
+ // Ensure that reading an entire aligned word containing the last character
+ // of a string will not read outside the allocated area (because we pad up
+ // to kObjectAlignment).
+ ASSERT(kObjectAlignment >= kReadAlignment);
+ // Assumes word reads and writes are little endian.
+ // Nothing to do for zero characters.
+ Label done;
+ if (!ascii) {
+ __ add(count, count, Operand(count), SetCC);
+ } else {
+ __ cmp(count, Operand(0));
+ }
+ __ b(eq, &done);
+
+ // Assume that you cannot read (or write) unaligned.
+ Label byte_loop;
+ // Must copy at least eight bytes, otherwise just do it one byte at a time.
+ __ cmp(count, Operand(8));
+ __ add(count, dest, Operand(count));
+ Register limit = count; // Read until src equals this.
+ __ b(lt, &byte_loop);
+
+ if (!dest_always_aligned) {
+ // Align dest by byte copying. Copies between zero and three bytes.
+ __ and_(scratch4, dest, Operand(kReadAlignmentMask), SetCC);
+ Label dest_aligned;
+ __ b(eq, &dest_aligned);
+ __ cmp(scratch4, Operand(2));
+ __ ldrb(scratch1, MemOperand(src, 1, PostIndex));
+ __ ldrb(scratch2, MemOperand(src, 1, PostIndex), le);
+ __ ldrb(scratch3, MemOperand(src, 1, PostIndex), lt);
+ __ strb(scratch1, MemOperand(dest, 1, PostIndex));
+ __ strb(scratch2, MemOperand(dest, 1, PostIndex), le);
+ __ strb(scratch3, MemOperand(dest, 1, PostIndex), lt);
+ __ bind(&dest_aligned);
+ }
+
+ Label simple_loop;
+
+ __ sub(scratch4, dest, Operand(src));
+ __ and_(scratch4, scratch4, Operand(0x03), SetCC);
+ __ b(eq, &simple_loop);
+ // Shift register is number of bits in a source word that
+ // must be combined with bits in the next source word in order
+ // to create a destination word.
+
+ // Complex loop for src/dst that are not aligned the same way.
+ {
+ Label loop;
+ __ mov(scratch4, Operand(scratch4, LSL, 3));
+ Register left_shift = scratch4;
+ __ and_(src, src, Operand(~3)); // Round down to load previous word.
+ __ ldr(scratch1, MemOperand(src, 4, PostIndex));
+ // Store the "shift" most significant bits of scratch in the least
+ // signficant bits (i.e., shift down by (32-shift)).
+ __ rsb(scratch2, left_shift, Operand(32));
+ Register right_shift = scratch2;
+ __ mov(scratch1, Operand(scratch1, LSR, right_shift));
+
+ __ bind(&loop);
+ __ ldr(scratch3, MemOperand(src, 4, PostIndex));
+ __ sub(scratch5, limit, Operand(dest));
+ __ orr(scratch1, scratch1, Operand(scratch3, LSL, left_shift));
+ __ str(scratch1, MemOperand(dest, 4, PostIndex));
+ __ mov(scratch1, Operand(scratch3, LSR, right_shift));
+ // Loop if four or more bytes left to copy.
+ // Compare to eight, because we did the subtract before increasing dst.
+ __ sub(scratch5, scratch5, Operand(8), SetCC);
+ __ b(ge, &loop);
+ }
+ // There is now between zero and three bytes left to copy (negative that
+ // number is in scratch5), and between one and three bytes already read into
+ // scratch1 (eight times that number in scratch4). We may have read past
+ // the end of the string, but because objects are aligned, we have not read
+ // past the end of the object.
+ // Find the minimum of remaining characters to move and preloaded characters
+ // and write those as bytes.
+ __ add(scratch5, scratch5, Operand(4), SetCC);
+ __ b(eq, &done);
+ __ cmp(scratch4, Operand(scratch5, LSL, 3), ne);
+ // Move minimum of bytes read and bytes left to copy to scratch4.
+ __ mov(scratch5, Operand(scratch4, LSR, 3), LeaveCC, lt);
+ // Between one and three (value in scratch5) characters already read into
+ // scratch ready to write.
+ __ cmp(scratch5, Operand(2));
+ __ strb(scratch1, MemOperand(dest, 1, PostIndex));
+ __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, ge);
+ __ strb(scratch1, MemOperand(dest, 1, PostIndex), ge);
+ __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, gt);
+ __ strb(scratch1, MemOperand(dest, 1, PostIndex), gt);
+ // Copy any remaining bytes.
+ __ b(&byte_loop);
+
+ // Simple loop.
+ // Copy words from src to dst, until less than four bytes left.
+ // Both src and dest are word aligned.
+ __ bind(&simple_loop);
+ {
+ Label loop;
+ __ bind(&loop);
+ __ ldr(scratch1, MemOperand(src, 4, PostIndex));
+ __ sub(scratch3, limit, Operand(dest));
+ __ str(scratch1, MemOperand(dest, 4, PostIndex));
+ // Compare to 8, not 4, because we do the substraction before increasing
+ // dest.
+ __ cmp(scratch3, Operand(8));
+ __ b(ge, &loop);
+ }
+
+ // Copy bytes from src to dst until dst hits limit.
+ __ bind(&byte_loop);
+ __ cmp(dest, Operand(limit));
+ __ ldrb(scratch1, MemOperand(src, 1, PostIndex), lt);
+ __ b(ge, &done);
+ __ strb(scratch1, MemOperand(dest, 1, PostIndex));
+ __ b(&byte_loop);
+
+ __ bind(&done);
+}
+
+
+void SubStringStub::Generate(MacroAssembler* masm) {
+ Label runtime;
+
+ // Stack frame on entry.
+ // lr: return address
+ // sp[0]: to
+ // sp[4]: from
+ // sp[8]: string
+
+ // This stub is called from the native-call %_SubString(...), so
+ // nothing can be assumed about the arguments. It is tested that:
+ // "string" is a sequential string,
+ // both "from" and "to" are smis, and
+ // 0 <= from <= to <= string.length.
+ // If any of these assumptions fail, we call the runtime system.
+
+ static const int kToOffset = 0 * kPointerSize;
+ static const int kFromOffset = 1 * kPointerSize;
+ static const int kStringOffset = 2 * kPointerSize;
+
+
+ // Check bounds and smi-ness.
+ __ ldr(r7, MemOperand(sp, kToOffset));
+ __ ldr(r6, MemOperand(sp, kFromOffset));
+ ASSERT_EQ(0, kSmiTag);
+ ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
+ // I.e., arithmetic shift right by one un-smi-tags.
+ __ mov(r2, Operand(r7, ASR, 1), SetCC);
+ __ mov(r3, Operand(r6, ASR, 1), SetCC, cc);
+ // If either r2 or r6 had the smi tag bit set, then carry is set now.
+ __ b(cs, &runtime); // Either "from" or "to" is not a smi.
+ __ b(mi, &runtime); // From is negative.
+
+ __ sub(r2, r2, Operand(r3), SetCC);
+ __ b(mi, &runtime); // Fail if from > to.
+ // Handle sub-strings of length 2 and less in the runtime system.
+ __ cmp(r2, Operand(2));
+ __ b(le, &runtime);
+
+ // r2: length
+ // r6: from (smi)
+ // r7: to (smi)
+
+ // Make sure first argument is a sequential (or flat) string.
+ __ ldr(r5, MemOperand(sp, kStringOffset));
+ ASSERT_EQ(0, kSmiTag);
+ __ tst(r5, Operand(kSmiTagMask));
+ __ b(eq, &runtime);
+ Condition is_string = masm->IsObjectStringType(r5, r1);
+ __ b(NegateCondition(is_string), &runtime);
+
+ // r1: instance type
+ // r2: length
+ // r5: string
+ // r6: from (smi)
+ // r7: to (smi)
+ Label seq_string;
+ __ and_(r4, r1, Operand(kStringRepresentationMask));
+ ASSERT(kSeqStringTag < kConsStringTag);
+ ASSERT(kExternalStringTag > kConsStringTag);
+ __ cmp(r4, Operand(kConsStringTag));
+ __ b(gt, &runtime); // External strings go to runtime.
+ __ b(lt, &seq_string); // Sequential strings are handled directly.
+
+ // Cons string. Try to recurse (once) on the first substring.
+ // (This adds a little more generality than necessary to handle flattened
+ // cons strings, but not much).
+ __ ldr(r5, FieldMemOperand(r5, ConsString::kFirstOffset));
+ __ ldr(r4, FieldMemOperand(r5, HeapObject::kMapOffset));
+ __ ldrb(r1, FieldMemOperand(r4, Map::kInstanceTypeOffset));
+ __ tst(r1, Operand(kStringRepresentationMask));
+ ASSERT_EQ(0, kSeqStringTag);
+ __ b(ne, &runtime); // Cons and External strings go to runtime.
+
+ // Definitly a sequential string.
+ __ bind(&seq_string);
+
+ // r1: instance type.
+ // r2: length
+ // r5: string
+ // r6: from (smi)
+ // r7: to (smi)
+ __ ldr(r4, FieldMemOperand(r5, String::kLengthOffset));
+ __ cmp(r4, Operand(r7, ASR, 1));
+ __ b(lt, &runtime); // Fail if to > length.
+
+ // r1: instance type.
+ // r2: result string length.
+ // r5: string.
+ // r6: from offset (smi)
+ // Check for flat ascii string.
+ Label non_ascii_flat;
+ __ tst(r1, Operand(kStringEncodingMask));
+ ASSERT_EQ(0, kTwoByteStringTag);
+ __ b(eq, &non_ascii_flat);
+
+ // Allocate the result.
+ __ AllocateAsciiString(r0, r2, r3, r4, r1, &runtime);
+
+ // r0: result string.
+ // r2: result string length.
+ // r5: string.
+ // r6: from offset (smi)
+ // Locate first character of result.
+ __ add(r1, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ // Locate 'from' character of string.
+ __ add(r5, r5, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ add(r5, r5, Operand(r6, ASR, 1));
+
+ // r0: result string.
+ // r1: first character of result string.
+ // r2: result string length.
+ // r5: first character of sub string to copy.
+ ASSERT_EQ(0, SeqAsciiString::kHeaderSize & kObjectAlignmentMask);
+ GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
+ COPY_ASCII | DEST_ALWAYS_ALIGNED);
+ __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
+ __ add(sp, sp, Operand(3 * kPointerSize));
+ __ Ret();
+
+ __ bind(&non_ascii_flat);
+ // r2: result string length.
+ // r5: string.
+ // r6: from offset (smi)
+ // Check for flat two byte string.
+
+ // Allocate the result.
+ __ AllocateTwoByteString(r0, r2, r1, r3, r4, &runtime);
+
+ // r0: result string.
+ // r2: result string length.
+ // r5: string.
+ // Locate first character of result.
+ __ add(r1, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ // Locate 'from' character of string.
+ __ add(r5, r5, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ // As "from" is a smi it is 2 times the value which matches the size of a two
+ // byte character.
+ __ add(r5, r5, Operand(r6));
+
+ // r0: result string.
+ // r1: first character of result.
+ // r2: result length.
+ // r5: first character of string to copy.
+ ASSERT_EQ(0, SeqTwoByteString::kHeaderSize & kObjectAlignmentMask);
+ GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
+ DEST_ALWAYS_ALIGNED);
+ __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
+ __ add(sp, sp, Operand(3 * kPointerSize));
+ __ Ret();
+
+ // Just jump to runtime to create the sub string.
+ __ bind(&runtime);
+ __ TailCallRuntime(ExternalReference(Runtime::kSubString), 3, 1);
+}
void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
@@ -6898,12 +7372,10 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
Label runtime;
// Stack frame on entry.
- // sp[0]: return address
- // sp[4]: right string
- // sp[8]: left string
-
- __ ldr(r0, MemOperand(sp, 2 * kPointerSize)); // left
- __ ldr(r1, MemOperand(sp, 1 * kPointerSize)); // right
+ // sp[0]: right string
+ // sp[4]: left string
+ __ ldr(r0, MemOperand(sp, 1 * kPointerSize)); // left
+ __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); // right
Label not_same;
__ cmp(r0, r1);
@@ -6932,6 +7404,220 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
}
+void StringAddStub::Generate(MacroAssembler* masm) {
+ Label string_add_runtime;
+ // Stack on entry:
+ // sp[0]: second argument.
+ // sp[4]: first argument.
+
+ // Load the two arguments.
+ __ ldr(r0, MemOperand(sp, 1 * kPointerSize)); // First argument.
+ __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); // Second argument.
+
+ // Make sure that both arguments are strings if not known in advance.
+ if (string_check_) {
+ ASSERT_EQ(0, kSmiTag);
+ __ JumpIfEitherSmi(r0, r1, &string_add_runtime);
+ // Load instance types.
+ __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
+ __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
+ ASSERT_EQ(0, kStringTag);
+ // If either is not a string, go to runtime.
+ __ tst(r4, Operand(kIsNotStringMask));
+ __ tst(r5, Operand(kIsNotStringMask), eq);
+ __ b(ne, &string_add_runtime);
+ }
+
+ // Both arguments are strings.
+ // r0: first string
+ // r1: second string
+ // r4: first string instance type (if string_check_)
+ // r5: second string instance type (if string_check_)
+ {
+ Label strings_not_empty;
+ // Check if either of the strings are empty. In that case return the other.
+ __ ldr(r2, FieldMemOperand(r0, String::kLengthOffset));
+ __ ldr(r3, FieldMemOperand(r1, String::kLengthOffset));
+ __ cmp(r2, Operand(0)); // Test if first string is empty.
+ __ mov(r0, Operand(r1), LeaveCC, eq); // If first is empty, return second.
+ __ cmp(r3, Operand(0), ne); // Else test if second string is empty.
+ __ b(ne, &strings_not_empty); // If either string was empty, return r0.
+
+ __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
+ __ add(sp, sp, Operand(2 * kPointerSize));
+ __ Ret();
+
+ __ bind(&strings_not_empty);
+ }
+
+ // Both strings are non-empty.
+ // r0: first string
+ // r1: second string
+ // r2: length of first string
+ // r3: length of second string
+ // r4: first string instance type (if string_check_)
+ // r5: second string instance type (if string_check_)
+ // Look at the length of the result of adding the two strings.
+ Label string_add_flat_result;
+ // Adding two lengths can't overflow.
+ ASSERT(String::kMaxLength * 2 > String::kMaxLength);
+ __ add(r6, r2, Operand(r3));
+ // Use the runtime system when adding two one character strings, as it
+ // contains optimizations for this specific case using the symbol table.
+ __ cmp(r6, Operand(2));
+ __ b(eq, &string_add_runtime);
+ // Check if resulting string will be flat.
+ __ cmp(r6, Operand(String::kMinNonFlatLength));
+ __ b(lt, &string_add_flat_result);
+ // Handle exceptionally long strings in the runtime system.
+ ASSERT((String::kMaxLength & 0x80000000) == 0);
+ ASSERT(IsPowerOf2(String::kMaxLength + 1));
+ // kMaxLength + 1 is representable as shifted literal, kMaxLength is not.
+ __ cmp(r6, Operand(String::kMaxLength + 1));
+ __ b(hs, &string_add_runtime);
+
+ // If result is not supposed to be flat, allocate a cons string object.
+ // If both strings are ascii the result is an ascii cons string.
+ if (!string_check_) {
+ __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
+ __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
+ }
+ Label non_ascii, allocated;
+ ASSERT_EQ(0, kTwoByteStringTag);
+ __ tst(r4, Operand(kStringEncodingMask));
+ __ tst(r5, Operand(kStringEncodingMask), ne);
+ __ b(eq, &non_ascii);
+
+ // Allocate an ASCII cons string.
+ __ AllocateAsciiConsString(r7, r6, r4, r5, &string_add_runtime);
+ __ bind(&allocated);
+ // Fill the fields of the cons string.
+ __ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset));
+ __ str(r1, FieldMemOperand(r7, ConsString::kSecondOffset));
+ __ mov(r0, Operand(r7));
+ __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
+ __ add(sp, sp, Operand(2 * kPointerSize));
+ __ Ret();
+
+ __ bind(&non_ascii);
+ // Allocate a two byte cons string.
+ __ AllocateTwoByteConsString(r7, r6, r4, r5, &string_add_runtime);
+ __ jmp(&allocated);
+
+ // Handle creating a flat result. First check that both strings are
+ // sequential and that they have the same encoding.
+ // r0: first string
+ // r1: second string
+ // r2: length of first string
+ // r3: length of second string
+ // r4: first string instance type (if string_check_)
+ // r5: second string instance type (if string_check_)
+ // r6: sum of lengths.
+ __ bind(&string_add_flat_result);
+ if (!string_check_) {
+ __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
+ __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
+ }
+ // Check that both strings are sequential.
+ ASSERT_EQ(0, kSeqStringTag);
+ __ tst(r4, Operand(kStringRepresentationMask));
+ __ tst(r5, Operand(kStringRepresentationMask), eq);
+ __ b(ne, &string_add_runtime);
+ // Now check if both strings have the same encoding (ASCII/Two-byte).
+ // r0: first string.
+ // r1: second string.
+ // r2: length of first string.
+ // r3: length of second string.
+ // r6: sum of lengths..
+ Label non_ascii_string_add_flat_result;
+ ASSERT(IsPowerOf2(kStringEncodingMask)); // Just one bit to test.
+ __ eor(r7, r4, Operand(r5));
+ __ tst(r7, Operand(kStringEncodingMask));
+ __ b(ne, &string_add_runtime);
+ // And see if it's ASCII or two-byte.
+ __ tst(r4, Operand(kStringEncodingMask));
+ __ b(eq, &non_ascii_string_add_flat_result);
+
+ // Both strings are sequential ASCII strings. We also know that they are
+ // short (since the sum of the lengths is less than kMinNonFlatLength).
+ __ AllocateAsciiString(r7, r6, r4, r5, r9, &string_add_runtime);
+ // Locate first character of result.
+ __ add(r6, r7, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ // Locate first character of first argument.
+ __ add(r0, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ // r0: first character of first string.
+ // r1: second string.
+ // r2: length of first string.
+ // r3: length of second string.
+ // r6: first character of result.
+ // r7: result string.
+ GenerateCopyCharacters(masm, r6, r0, r2, r4, true);
+
+ // Load second argument and locate first character.
+ __ add(r1, r1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ // r1: first character of second string.
+ // r3: length of second string.
+ // r6: next character of result.
+ // r7: result string.
+ GenerateCopyCharacters(masm, r6, r1, r3, r4, true);
+ __ mov(r0, Operand(r7));
+ __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
+ __ add(sp, sp, Operand(2 * kPointerSize));
+ __ Ret();
+
+ __ bind(&non_ascii_string_add_flat_result);
+ // Both strings are sequential two byte strings.
+ // r0: first string.
+ // r1: second string.
+ // r2: length of first string.
+ // r3: length of second string.
+ // r6: sum of length of strings.
+ __ AllocateTwoByteString(r7, r6, r4, r5, r9, &string_add_runtime);
+ // r0: first string.
+ // r1: second string.
+ // r2: length of first string.
+ // r3: length of second string.
+ // r7: result string.
+
+ // Locate first character of result.
+ __ add(r6, r7, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ // Locate first character of first argument.
+ __ add(r0, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+
+ // r0: first character of first string.
+ // r1: second string.
+ // r2: length of first string.
+ // r3: length of second string.
+ // r6: first character of result.
+ // r7: result string.
+ GenerateCopyCharacters(masm, r6, r0, r2, r4, false);
+
+ // Locate first character of second argument.
+ __ add(r1, r1, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+
+ // r1: first character of second string.
+ // r3: length of second string.
+ // r6: next character of result (after copy of first string).
+ // r7: result string.
+ GenerateCopyCharacters(masm, r6, r1, r3, r4, false);
+
+ __ mov(r0, Operand(r7));
+ __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
+ __ add(sp, sp, Operand(2 * kPointerSize));
+ __ Ret();
+
+ // Just jump to runtime to add the two strings.
+ __ bind(&string_add_runtime);
+ __ TailCallRuntime(ExternalReference(Runtime::kStringAdd), 2, 1);
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h
index 0384485f1..10e28f411 100644
--- a/deps/v8/src/arm/codegen-arm.h
+++ b/deps/v8/src/arm/codegen-arm.h
@@ -161,19 +161,15 @@ class CodeGenerator: public AstVisitor {
// Takes a function literal, generates code for it. This function should only
// be called by compiler.cc.
- static Handle<Code> MakeCode(FunctionLiteral* fun,
- Handle<Script> script,
- bool is_eval,
- CompilationInfo* info);
+ static Handle<Code> MakeCode(CompilationInfo* info);
// Printing of AST, etc. as requested by flags.
- static void MakeCodePrologue(FunctionLiteral* fun);
+ static void MakeCodePrologue(CompilationInfo* info);
// Allocate and install the code.
- static Handle<Code> MakeCodeEpilogue(FunctionLiteral* fun,
- MacroAssembler* masm,
+ static Handle<Code> MakeCodeEpilogue(MacroAssembler* masm,
Code::Flags flags,
- Handle<Script> script);
+ CompilationInfo* info);
#ifdef ENABLE_LOGGING_AND_PROFILING
static bool ShouldGenerateLog(Expression* type);
@@ -189,7 +185,7 @@ class CodeGenerator: public AstVisitor {
// Accessors
MacroAssembler* masm() { return masm_; }
VirtualFrame* frame() const { return frame_; }
- Handle<Script> script() { return script_; }
+ inline Handle<Script> script();
bool has_valid_frame() const { return frame_ != NULL; }
@@ -212,16 +208,15 @@ class CodeGenerator: public AstVisitor {
private:
// Construction/Destruction
- CodeGenerator(MacroAssembler* masm, Handle<Script> script, bool is_eval);
+ explicit CodeGenerator(MacroAssembler* masm);
// Accessors
- Scope* scope() const { return scope_; }
+ inline bool is_eval();
+ Scope* scope();
// Generating deferred code.
void ProcessDeferred();
- bool is_eval() { return is_eval_; }
-
// State
bool has_cc() const { return cc_reg_ != al; }
JumpTarget* true_target() const { return state_->true_target(); }
@@ -249,7 +244,7 @@ class CodeGenerator: public AstVisitor {
inline void VisitStatementsAndSpill(ZoneList<Statement*>* statements);
// Main code generation function
- void Generate(FunctionLiteral* fun, Mode mode, CompilationInfo* info);
+ void Generate(CompilationInfo* info, Mode mode);
// The following are used by class Reference.
void LoadReference(Reference* ref);
@@ -403,6 +398,9 @@ class CodeGenerator: public AstVisitor {
// Support for direct calls from JavaScript to native RegExp code.
void GenerateRegExpExec(ZoneList<Expression*>* args);
+ // Fast support for number to string.
+ void GenerateNumberToString(ZoneList<Expression*>* args);
+
// Simple condition analysis.
enum ConditionAnalysis {
ALWAYS_TRUE,
@@ -425,16 +423,14 @@ class CodeGenerator: public AstVisitor {
bool HasValidEntryRegisters();
#endif
- bool is_eval_; // Tells whether code is generated for eval.
-
- Handle<Script> script_;
List<DeferredCode*> deferred_;
// Assembler
MacroAssembler* masm_; // to generate code
+ CompilationInfo* info_;
+
// Code generation state
- Scope* scope_;
VirtualFrame* frame_;
RegisterAllocator* allocator_;
Condition cc_reg_;
@@ -538,6 +534,74 @@ class GenericBinaryOpStub : public CodeStub {
};
+class StringStubBase: public CodeStub {
+ public:
+ // Generate code for copying characters using a simple loop. This should only
+ // be used in places where the number of characters is small and the
+ // additional setup and checking in GenerateCopyCharactersLong adds too much
+ // overhead. Copying of overlapping regions is not supported.
+ // Dest register ends at the position after the last character written.
+ void GenerateCopyCharacters(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ Register scratch,
+ bool ascii);
+
+ // Generate code for copying a large number of characters. This function
+ // is allowed to spend extra time setting up conditions to make copying
+ // faster. Copying of overlapping regions is not supported.
+ // Dest register ends at the position after the last character written.
+ void GenerateCopyCharactersLong(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Register scratch5,
+ int flags);
+};
+
+
+// Flag that indicates how to generate code for the stub StringAddStub.
+enum StringAddFlags {
+ NO_STRING_ADD_FLAGS = 0,
+ NO_STRING_CHECK_IN_STUB = 1 << 0 // Omit string check in stub.
+};
+
+
+class StringAddStub: public StringStubBase {
+ public:
+ explicit StringAddStub(StringAddFlags flags) {
+ string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0);
+ }
+
+ private:
+ Major MajorKey() { return StringAdd; }
+ int MinorKey() { return string_check_ ? 0 : 1; }
+
+ void Generate(MacroAssembler* masm);
+
+ // Should the stub check whether arguments are strings?
+ bool string_check_;
+};
+
+
+class SubStringStub: public StringStubBase {
+ public:
+ SubStringStub() {}
+
+ private:
+ Major MajorKey() { return SubString; }
+ int MinorKey() { return 0; }
+
+ void Generate(MacroAssembler* masm);
+};
+
+
+
class StringCompareStub: public CodeStub {
public:
StringCompareStub() { }
diff --git a/deps/v8/src/arm/debug-arm.cc b/deps/v8/src/arm/debug-arm.cc
index 6eb5239b8..e6b61b4d2 100644
--- a/deps/v8/src/arm/debug-arm.cc
+++ b/deps/v8/src/arm/debug-arm.cc
@@ -128,7 +128,7 @@ void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
// -- lr : return address
// -- [sp] : receiver
// -----------------------------------
- // Registers r0 and r2 contain objects that needs to be pushed on the
+ // Registers r0 and r2 contain objects that need to be pushed on the
// expression stack of the fake JS frame.
Generate_DebugBreakCallHelper(masm, r0.bit() | r2.bit());
}
@@ -137,14 +137,14 @@ void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC store (from ic-arm.cc).
// ----------- S t a t e -------------
- // -- r0 : receiver
+ // -- r0 : value
+ // -- r1 : receiver
// -- r2 : name
// -- lr : return address
- // -- [sp] : receiver
// -----------------------------------
- // Registers r0 and r2 contain objects that needs to be pushed on the
+ // Registers r0, r1, and r2 contain objects that need to be pushed on the
// expression stack of the fake JS frame.
- Generate_DebugBreakCallHelper(masm, r0.bit() | r2.bit());
+ Generate_DebugBreakCallHelper(masm, r0.bit() | r1.bit() | r2.bit());
}
diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc
index 5b314557d..127c16086 100644
--- a/deps/v8/src/arm/disasm-arm.cc
+++ b/deps/v8/src/arm/disasm-arm.cc
@@ -429,12 +429,22 @@ int Decoder::FormatOption(Instr* instr, const char* format) {
return 3;
}
case 'o': {
- if (format[3] == '1') {
+ if ((format[3] == '1') && (format[4] == '2')) {
// 'off12: 12-bit offset for load and store instructions
ASSERT(STRING_STARTS_WITH(format, "off12"));
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"%d", instr->Offset12Field());
return 5;
+ } else if ((format[3] == '1') && (format[4] == '6')) {
+ ASSERT(STRING_STARTS_WITH(format, "off16to20"));
+ out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%d", instr->Bits(20, 16) +1);
+ return 9;
+ } else if (format[3] == '7') {
+ ASSERT(STRING_STARTS_WITH(format, "off7to11"));
+ out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%d", instr->ShiftAmountField());
+ return 8;
}
// 'off8: 8-bit offset for extra load and store instructions
ASSERT(STRING_STARTS_WITH(format, "off8"));
@@ -795,7 +805,18 @@ void Decoder::DecodeType3(Instr* instr) {
break;
}
case 3: {
- Format(instr, "'memop'cond'b 'rd, ['rn, +'shift_rm]'w");
+ if (instr->HasW() && (instr->Bits(6, 4) == 0x5)) {
+ uint32_t widthminus1 = static_cast<uint32_t>(instr->Bits(20, 16));
+ uint32_t lsbit = static_cast<uint32_t>(instr->ShiftAmountField());
+ uint32_t msbit = widthminus1 + lsbit;
+ if (msbit <= 31) {
+ Format(instr, "ubfx'cond 'rd, 'rm, #'off7to11, #'off16to20");
+ } else {
+ UNREACHABLE();
+ }
+ } else {
+ Format(instr, "'memop'cond'b 'rd, ['rn, +'shift_rm]'w");
+ }
break;
}
default: {
diff --git a/deps/v8/src/arm/fast-codegen-arm.cc b/deps/v8/src/arm/fast-codegen-arm.cc
index 1aeea7ab6..a07b0d2dc 100644
--- a/deps/v8/src/arm/fast-codegen-arm.cc
+++ b/deps/v8/src/arm/fast-codegen-arm.cc
@@ -35,78 +35,142 @@ namespace internal {
#define __ ACCESS_MASM(masm())
-void FastCodeGenerator::EmitLoadReceiver(Register reg) {
+Register FastCodeGenerator::accumulator0() { return r0; }
+Register FastCodeGenerator::accumulator1() { return r1; }
+Register FastCodeGenerator::scratch0() { return r3; }
+Register FastCodeGenerator::scratch1() { return r4; }
+Register FastCodeGenerator::receiver_reg() { return r2; }
+Register FastCodeGenerator::context_reg() { return cp; }
+
+
+void FastCodeGenerator::EmitLoadReceiver() {
// Offset 2 is due to return address and saved frame pointer.
- int index = 2 + function()->scope()->num_parameters();
- __ ldr(reg, MemOperand(sp, index * kPointerSize));
+ int index = 2 + scope()->num_parameters();
+ __ ldr(receiver_reg(), MemOperand(sp, index * kPointerSize));
}
-void FastCodeGenerator::EmitReceiverMapCheck() {
- Comment cmnt(masm(), ";; MapCheck(this)");
- if (FLAG_print_ir) {
- PrintF("MapCheck(this)\n");
- }
+void FastCodeGenerator::EmitGlobalVariableLoad(Handle<Object> cell) {
+ ASSERT(!destination().is(no_reg));
+ ASSERT(cell->IsJSGlobalPropertyCell());
- EmitLoadReceiver(r1);
- __ BranchOnSmi(r1, bailout());
+ __ mov(destination(), Operand(cell));
+ __ ldr(destination(),
+ FieldMemOperand(destination(), JSGlobalPropertyCell::kValueOffset));
+ if (FLAG_debug_code) {
+ __ mov(ip, Operand(Factory::the_hole_value()));
+ __ cmp(destination(), ip);
+ __ Check(ne, "DontDelete cells can't contain the hole");
+ }
- ASSERT(has_receiver() && receiver()->IsHeapObject());
- Handle<HeapObject> object = Handle<HeapObject>::cast(receiver());
- Handle<Map> map(object->map());
- __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ mov(ip, Operand(map));
- __ cmp(r3, ip);
- __ b(ne, bailout());
+ // The loaded value is not known to be a smi.
+ clear_as_smi(destination());
}
-void FastCodeGenerator::EmitGlobalVariableLoad(Handle<String> name) {
- // Compile global variable accesses as load IC calls. The only live
- // registers are cp (context) and possibly r1 (this). Both are also saved
- // in the stack and cp is preserved by the call.
- __ ldr(ip, CodeGenerator::GlobalObject());
- __ push(ip);
- __ mov(r2, Operand(name));
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
- if (has_this_properties()) {
- // Restore this.
- EmitLoadReceiver(r1);
+void FastCodeGenerator::EmitThisPropertyStore(Handle<String> name) {
+ LookupResult lookup;
+ info()->receiver()->Lookup(*name, &lookup);
+
+ ASSERT(lookup.holder() == *info()->receiver());
+ ASSERT(lookup.type() == FIELD);
+ Handle<Map> map(Handle<HeapObject>::cast(info()->receiver())->map());
+ int index = lookup.GetFieldIndex() - map->inobject_properties();
+ int offset = index * kPointerSize;
+
+ // We will emit the write barrier unless the stored value is statically
+ // known to be a smi.
+ bool needs_write_barrier = !is_smi(accumulator0());
+
+ // Negative offsets are inobject properties.
+ if (offset < 0) {
+ offset += map->instance_size();
+ __ str(accumulator0(), FieldMemOperand(receiver_reg(), offset));
+ if (needs_write_barrier) {
+ // Preserve receiver from write barrier.
+ __ mov(scratch0(), receiver_reg());
+ }
+ } else {
+ offset += FixedArray::kHeaderSize;
+ __ ldr(scratch0(),
+ FieldMemOperand(receiver_reg(), JSObject::kPropertiesOffset));
+ __ str(accumulator0(), FieldMemOperand(scratch0(), offset));
+ }
+
+ if (needs_write_barrier) {
+ __ mov(scratch1(), Operand(offset));
+ __ RecordWrite(scratch0(), scratch1(), ip);
+ }
+
+ if (destination().is(accumulator1())) {
+ __ mov(accumulator1(), accumulator0());
+ if (is_smi(accumulator0())) {
+ set_as_smi(accumulator1());
+ } else {
+ clear_as_smi(accumulator1());
+ }
}
}
-void FastCodeGenerator::EmitThisPropertyStore(Handle<String> name) {
+void FastCodeGenerator::EmitThisPropertyLoad(Handle<String> name) {
+ ASSERT(!destination().is(no_reg));
LookupResult lookup;
- receiver()->Lookup(*name, &lookup);
+ info()->receiver()->Lookup(*name, &lookup);
- ASSERT(lookup.holder() == *receiver());
+ ASSERT(lookup.holder() == *info()->receiver());
ASSERT(lookup.type() == FIELD);
- Handle<Map> map(Handle<HeapObject>::cast(receiver())->map());
+ Handle<Map> map(Handle<HeapObject>::cast(info()->receiver())->map());
int index = lookup.GetFieldIndex() - map->inobject_properties();
int offset = index * kPointerSize;
- // Negative offsets are inobject properties.
+ // Perform the load. Negative offsets are inobject properties.
if (offset < 0) {
offset += map->instance_size();
- __ mov(r2, r1); // Copy receiver for write barrier.
+ __ ldr(destination(), FieldMemOperand(receiver_reg(), offset));
} else {
offset += FixedArray::kHeaderSize;
- __ ldr(r2, FieldMemOperand(r1, JSObject::kPropertiesOffset));
+ __ ldr(scratch0(),
+ FieldMemOperand(receiver_reg(), JSObject::kPropertiesOffset));
+ __ ldr(destination(), FieldMemOperand(scratch0(), offset));
+ }
+
+ // The loaded value is not known to be a smi.
+ clear_as_smi(destination());
+}
+
+
+void FastCodeGenerator::EmitBitOr() {
+ if (is_smi(accumulator0()) && is_smi(accumulator1())) {
+ // If both operands are known to be a smi then there is no need to check
+ // the operands or result. There is no need to perform the operation in
+ // an effect context.
+ if (!destination().is(no_reg)) {
+ __ orr(destination(), accumulator1(), Operand(accumulator0()));
+ }
+ } else if (destination().is(no_reg)) {
+ // Result is not needed but do not clobber the operands in case of
+ // bailout.
+ __ orr(scratch0(), accumulator1(), Operand(accumulator0()));
+ __ BranchOnNotSmi(scratch0(), bailout());
+ } else {
+ // Preserve the destination operand in a scratch register in case of
+ // bailout.
+ __ mov(scratch0(), destination());
+ __ orr(destination(), accumulator1(), Operand(accumulator0()));
+ __ BranchOnNotSmi(destination(), bailout());
}
- // Perform the store.
- __ str(r0, FieldMemOperand(r2, offset));
- __ mov(r3, Operand(offset));
- __ RecordWrite(r2, r3, ip);
+
+ // If we didn't bailout, the result (in fact, both inputs too) is known to
+ // be a smi.
+ set_as_smi(accumulator0());
+ set_as_smi(accumulator1());
}
-void FastCodeGenerator::Generate(FunctionLiteral* fun, CompilationInfo* info) {
- ASSERT(function_ == NULL);
+void FastCodeGenerator::Generate(CompilationInfo* compilation_info) {
ASSERT(info_ == NULL);
- function_ = fun;
- info_ = info;
+ info_ = compilation_info;
// Save the caller's frame pointer and set up our own.
Comment prologue_cmnt(masm(), ";; Prologue");
@@ -115,18 +179,42 @@ void FastCodeGenerator::Generate(FunctionLiteral* fun, CompilationInfo* info) {
// Note that we keep a live register reference to cp (context) at
// this point.
- // Receiver (this) is allocated to r1 if there are this properties.
- if (has_this_properties()) EmitReceiverMapCheck();
+ // Receiver (this) is allocated to a fixed register.
+ if (info()->has_this_properties()) {
+ Comment cmnt(masm(), ";; MapCheck(this)");
+ if (FLAG_print_ir) {
+ PrintF("MapCheck(this)\n");
+ }
+ ASSERT(info()->has_receiver() && info()->receiver()->IsHeapObject());
+ Handle<HeapObject> object = Handle<HeapObject>::cast(info()->receiver());
+ Handle<Map> map(object->map());
+ EmitLoadReceiver();
+ __ CheckMap(receiver_reg(), scratch0(), map, bailout(), false);
+ }
- VisitStatements(fun->body());
+ // If there is a global variable access check if the global object is the
+ // same as at lazy-compilation time.
+ if (info()->has_globals()) {
+ Comment cmnt(masm(), ";; MapCheck(GLOBAL)");
+ if (FLAG_print_ir) {
+ PrintF("MapCheck(GLOBAL)\n");
+ }
+ ASSERT(info()->has_global_object());
+ Handle<Map> map(info()->global_object()->map());
+ __ ldr(scratch0(), CodeGenerator::GlobalObject());
+ __ CheckMap(scratch0(), scratch1(), map, bailout(), true);
+ }
+
+ VisitStatements(function()->body());
Comment return_cmnt(masm(), ";; Return(<undefined>)");
+ if (FLAG_print_ir) {
+ PrintF("Return(<undefined>)\n");
+ }
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
-
- Comment epilogue_cmnt(masm(), ";; Epilogue");
__ mov(sp, fp);
__ ldm(ia_w, sp, fp.bit() | lr.bit());
- int32_t sp_delta = (fun->scope()->num_parameters() + 1) * kPointerSize;
+ int32_t sp_delta = (scope()->num_parameters() + 1) * kPointerSize;
__ add(sp, sp, Operand(sp_delta));
__ Jump(lr);
diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc
index 9f240dd82..489637380 100644
--- a/deps/v8/src/arm/full-codegen-arm.cc
+++ b/deps/v8/src/arm/full-codegen-arm.cc
@@ -52,12 +52,13 @@ namespace internal {
//
// The function builds a JS frame. Please see JavaScriptFrameConstants in
// frames-arm.h for its layout.
-void FullCodeGenerator::Generate(FunctionLiteral* fun, Mode mode) {
- function_ = fun;
- SetFunctionPosition(fun);
+void FullCodeGenerator::Generate(CompilationInfo* info, Mode mode) {
+ ASSERT(info_ == NULL);
+ info_ = info;
+ SetFunctionPosition(function());
if (mode == PRIMARY) {
- int locals_count = fun->scope()->num_stack_slots();
+ int locals_count = scope()->num_stack_slots();
__ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
if (locals_count > 0) {
@@ -77,7 +78,7 @@ void FullCodeGenerator::Generate(FunctionLiteral* fun, Mode mode) {
bool function_in_register = true;
// Possibly allocate a local context.
- if (fun->scope()->num_heap_slots() > 0) {
+ if (scope()->num_heap_slots() > 0) {
Comment cmnt(masm_, "[ Allocate local context");
// Argument to NewContext is the function, which is in r1.
__ push(r1);
@@ -87,9 +88,9 @@ void FullCodeGenerator::Generate(FunctionLiteral* fun, Mode mode) {
// passed to us. It's saved in the stack and kept live in cp.
__ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Copy any necessary parameters into the context.
- int num_parameters = fun->scope()->num_parameters();
+ int num_parameters = scope()->num_parameters();
for (int i = 0; i < num_parameters; i++) {
- Slot* slot = fun->scope()->parameter(i)->slot();
+ Slot* slot = scope()->parameter(i)->slot();
if (slot != NULL && slot->type() == Slot::CONTEXT) {
int parameter_offset = StandardFrameConstants::kCallerSPOffset +
(num_parameters - 1 - i) * kPointerSize;
@@ -107,7 +108,7 @@ void FullCodeGenerator::Generate(FunctionLiteral* fun, Mode mode) {
}
}
- Variable* arguments = fun->scope()->arguments()->AsVariable();
+ Variable* arguments = scope()->arguments()->AsVariable();
if (arguments != NULL) {
// Function uses arguments object.
Comment cmnt(masm_, "[ Allocate arguments object");
@@ -118,9 +119,10 @@ void FullCodeGenerator::Generate(FunctionLiteral* fun, Mode mode) {
__ mov(r3, r1);
}
// Receiver is just before the parameters on the caller's stack.
- __ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset +
- fun->num_parameters() * kPointerSize));
- __ mov(r1, Operand(Smi::FromInt(fun->num_parameters())));
+ int offset = scope()->num_parameters() * kPointerSize;
+ __ add(r2, fp,
+ Operand(StandardFrameConstants::kCallerSPOffset + offset));
+ __ mov(r1, Operand(Smi::FromInt(scope()->num_parameters())));
__ stm(db_w, sp, r3.bit() | r2.bit() | r1.bit());
// Arguments to ArgumentsAccessStub:
@@ -133,7 +135,7 @@ void FullCodeGenerator::Generate(FunctionLiteral* fun, Mode mode) {
__ mov(r3, r0);
Move(arguments->slot(), r0, r1, r2);
Slot* dot_arguments_slot =
- fun->scope()->arguments_shadow()->AsVariable()->slot();
+ scope()->arguments_shadow()->AsVariable()->slot();
Move(dot_arguments_slot, r3, r1, r2);
}
}
@@ -155,7 +157,7 @@ void FullCodeGenerator::Generate(FunctionLiteral* fun, Mode mode) {
}
{ Comment cmnt(masm_, "[ Declarations");
- VisitDeclarations(fun->scope()->declarations());
+ VisitDeclarations(scope()->declarations());
}
if (FLAG_trace) {
@@ -164,7 +166,7 @@ void FullCodeGenerator::Generate(FunctionLiteral* fun, Mode mode) {
{ Comment cmnt(masm_, "[ Body");
ASSERT(loop_depth() == 0);
- VisitStatements(fun->body());
+ VisitStatements(function()->body());
ASSERT(loop_depth() == 0);
}
@@ -173,7 +175,7 @@ void FullCodeGenerator::Generate(FunctionLiteral* fun, Mode mode) {
// body.
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
}
- EmitReturnSequence(function_->end_position());
+ EmitReturnSequence(function()->end_position());
}
@@ -196,7 +198,7 @@ void FullCodeGenerator::EmitReturnSequence(int position) {
// Calculate the exact length of the return sequence and make sure that
// the constant pool is not emitted inside of the return sequence.
- int num_parameters = function_->scope()->num_parameters();
+ int num_parameters = scope()->num_parameters();
int32_t sp_delta = (num_parameters + 1) * kPointerSize;
int return_sequence_length = Assembler::kJSReturnSequenceLength;
if (!masm_->ImmediateFitsAddrMode1Instruction(sp_delta)) {
@@ -512,7 +514,7 @@ MemOperand FullCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
return MemOperand(fp, SlotOffset(slot));
case Slot::CONTEXT: {
int context_chain_length =
- function_->scope()->ContextChainLength(slot->var()->scope());
+ scope()->ContextChainLength(slot->var()->scope());
__ LoadContext(scratch, context_chain_length);
return CodeGenerator::ContextOperand(scratch, slot->index());
}
@@ -572,7 +574,7 @@ void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
// this specific context.
// The variable in the decl always resides in the current context.
- ASSERT_EQ(0, function_->scope()->ContextChainLength(var->scope()));
+ ASSERT_EQ(0, scope()->ContextChainLength(var->scope()));
if (FLAG_debug_code) {
// Check if we have the correct context pointer.
__ ldr(r1,
@@ -652,7 +654,7 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// Call the runtime to declare the globals.
// The context is the first argument.
__ mov(r1, Operand(pairs));
- __ mov(r0, Operand(Smi::FromInt(is_eval_ ? 1 : 0)));
+ __ mov(r0, Operand(Smi::FromInt(is_eval() ? 1 : 0)));
__ stm(db_w, sp, cp.bit() | r1.bit() | r0.bit());
__ CallRuntime(Runtime::kDeclareGlobals, 3);
// Return value is ignored.
@@ -664,7 +666,7 @@ void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
// Build the function boilerplate and instantiate it.
Handle<JSFunction> boilerplate =
- Compiler::BuildBoilerplate(expr, script_, this);
+ Compiler::BuildBoilerplate(expr, script(), this);
if (HasStackOverflow()) return;
ASSERT(boilerplate->IsBoilerplate());
@@ -814,9 +816,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
if (key->handle()->IsSymbol()) {
VisitForValue(value, kAccumulator);
__ mov(r2, Operand(key->handle()));
+ __ ldr(r1, MemOperand(sp));
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
- // StoreIC leaves the receiver on the stack.
break;
}
// Fall through.
@@ -905,6 +907,92 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
+void FullCodeGenerator::VisitAssignment(Assignment* expr) {
+ Comment cmnt(masm_, "[ Assignment");
+ ASSERT(expr->op() != Token::INIT_CONST);
+ // Left-hand side can only be a property, a global or a (parameter or local)
+ // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+ enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+ LhsKind assign_type = VARIABLE;
+ Property* prop = expr->target()->AsProperty();
+ if (prop != NULL) {
+ assign_type =
+ (prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
+ }
+
+ // Evaluate LHS expression.
+ switch (assign_type) {
+ case VARIABLE:
+ // Nothing to do here.
+ break;
+ case NAMED_PROPERTY:
+ if (expr->is_compound()) {
+ // We need the receiver both on the stack and in the accumulator.
+ VisitForValue(prop->obj(), kAccumulator);
+ __ push(result_register());
+ } else {
+ VisitForValue(prop->obj(), kStack);
+ }
+ break;
+ case KEYED_PROPERTY:
+ VisitForValue(prop->obj(), kStack);
+ VisitForValue(prop->key(), kStack);
+ break;
+ }
+
+ // If we have a compound assignment: Get value of LHS expression and
+ // store in on top of the stack.
+ if (expr->is_compound()) {
+ Location saved_location = location_;
+ location_ = kStack;
+ switch (assign_type) {
+ case VARIABLE:
+ EmitVariableLoad(expr->target()->AsVariableProxy()->var(),
+ Expression::kValue);
+ break;
+ case NAMED_PROPERTY:
+ EmitNamedPropertyLoad(prop);
+ __ push(result_register());
+ break;
+ case KEYED_PROPERTY:
+ EmitKeyedPropertyLoad(prop);
+ __ push(result_register());
+ break;
+ }
+ location_ = saved_location;
+ }
+
+ // Evaluate RHS expression.
+ Expression* rhs = expr->value();
+ VisitForValue(rhs, kAccumulator);
+
+ // If we have a compound assignment: Apply operator.
+ if (expr->is_compound()) {
+ Location saved_location = location_;
+ location_ = kAccumulator;
+ EmitBinaryOp(expr->binary_op(), Expression::kValue);
+ location_ = saved_location;
+ }
+
+ // Record source position before possible IC call.
+ SetSourcePosition(expr->position());
+
+ // Store the value.
+ switch (assign_type) {
+ case VARIABLE:
+ EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
+ context_);
+ break;
+ case NAMED_PROPERTY:
+ EmitNamedPropertyAssignment(expr);
+ break;
+ case KEYED_PROPERTY:
+ EmitKeyedPropertyAssignment(expr);
+ break;
+ }
+}
+
+
void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
@@ -943,21 +1031,17 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
ASSERT(!var->is_this());
// Assignment to a global variable. Use inline caching for the
// assignment. Right-hand-side value is passed in r0, variable name in
- // r2, and the global object on the stack.
+ // r2, and the global object in r1.
__ mov(r2, Operand(var->name()));
- __ ldr(ip, CodeGenerator::GlobalObject());
- __ push(ip);
+ __ ldr(r1, CodeGenerator::GlobalObject());
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
- // Overwrite the global object on the stack with the result if needed.
- DropAndApply(1, context, r0);
} else if (slot != NULL && slot->type() == Slot::LOOKUP) {
__ push(result_register()); // Value.
__ mov(r1, Operand(var->name()));
__ stm(db_w, sp, cp.bit() | r1.bit()); // Context and name.
__ CallRuntime(Runtime::kStoreContextSlot, 3);
- Apply(context, r0);
} else if (var->slot() != NULL) {
Slot* slot = var->slot();
@@ -984,13 +1068,13 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
UNREACHABLE();
break;
}
- Apply(context, result_register());
} else {
// Variables rewritten as properties are not treated as variables in
// assignments.
UNREACHABLE();
}
+ Apply(context, result_register());
}
@@ -1014,6 +1098,12 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
// Record source code position before IC call.
SetSourcePosition(expr->position());
__ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
+ if (expr->ends_initialization_block()) {
+ __ ldr(r1, MemOperand(sp));
+ } else {
+ __ pop(r1);
+ }
+
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
@@ -1024,9 +1114,10 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
__ push(ip);
__ CallRuntime(Runtime::kToFastProperties, 1);
__ pop(r0);
+ DropAndApply(1, context_, r0);
+ } else {
+ Apply(context_, r0);
}
-
- DropAndApply(1, context_, r0);
}
@@ -1085,7 +1176,7 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
}
void FullCodeGenerator::EmitCallWithIC(Call* expr,
- Handle<Object> ignored,
+ Handle<Object> name,
RelocInfo::Mode mode) {
// Code common for calls using the IC.
ZoneList<Expression*>* args = expr->arguments();
@@ -1093,16 +1184,16 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr,
for (int i = 0; i < arg_count; i++) {
VisitForValue(args->at(i), kStack);
}
+ __ mov(r2, Operand(name));
// Record source position for debugger.
SetSourcePosition(expr->position());
// Call the IC initialization code.
- Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
- NOT_IN_LOOP);
+ InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
+ Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count, in_loop);
__ Call(ic, mode);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- // Discard the function left on TOS.
- DropAndApply(1, context_, r0);
+ Apply(context_, r0);
}
@@ -1119,7 +1210,6 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr) {
__ CallStub(&stub);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- // Discard the function left on TOS.
DropAndApply(1, context_, r0);
}
@@ -1133,11 +1223,9 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// Call to the identifier 'eval'.
UNREACHABLE();
} else if (var != NULL && !var->is_this() && var->is_global()) {
- // Call to a global variable.
- __ mov(r1, Operand(var->name()));
- // Push global object as receiver for the call IC lookup.
+ // Push global object as receiver for the call IC.
__ ldr(r0, CodeGenerator::GlobalObject());
- __ stm(db_w, sp, r1.bit() | r0.bit());
+ __ push(r0);
EmitCallWithIC(expr, var->name(), RelocInfo::CODE_TARGET_CONTEXT);
} else if (var != NULL && var->slot() != NULL &&
var->slot()->type() == Slot::LOOKUP) {
@@ -1149,8 +1237,6 @@ void FullCodeGenerator::VisitCall(Call* expr) {
Literal* key = prop->key()->AsLiteral();
if (key != NULL && key->handle()->IsSymbol()) {
// Call to a named property, use call IC.
- __ mov(r0, Operand(key->handle()));
- __ push(r0);
VisitForValue(prop->obj(), kStack);
EmitCallWithIC(expr, key->handle(), RelocInfo::CODE_TARGET);
} else {
@@ -1236,10 +1322,9 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
if (expr->is_jsruntime()) {
// Prepare for calling JS runtime function.
- __ mov(r1, Operand(expr->name()));
__ ldr(r0, CodeGenerator::GlobalObject());
__ ldr(r0, FieldMemOperand(r0, GlobalObject::kBuiltinsOffset));
- __ stm(db_w, sp, r1.bit() | r0.bit());
+ __ push(r0);
}
// Push the arguments ("left-to-right").
@@ -1250,18 +1335,17 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
if (expr->is_jsruntime()) {
// Call the JS runtime function.
+ __ mov(r2, Operand(expr->name()));
Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
NOT_IN_LOOP);
__ Call(ic, RelocInfo::CODE_TARGET);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- // Discard the function left on TOS.
- DropAndApply(1, context_, r0);
} else {
// Call the C runtime function.
__ CallRuntime(expr->function(), arg_count);
- Apply(context_, r0);
}
+ Apply(context_, r0);
}
@@ -1546,15 +1630,15 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
case NAMED_PROPERTY: {
__ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
+ __ pop(r1);
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
if (expr->is_postfix()) {
- __ Drop(1); // Result is on the stack under the receiver.
if (context_ != Expression::kEffect) {
ApplyTOS(context_);
}
} else {
- DropAndApply(1, context_, r0);
+ Apply(context_, r0);
}
break;
}
diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc
index bae1e9679..7ddb3386e 100644
--- a/deps/v8/src/arm/ic-arm.cc
+++ b/deps/v8/src/arm/ic-arm.cc
@@ -59,7 +59,7 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
// r3 - used as temporary and to hold the capacity of the property
// dictionary.
//
- // r2 - holds the name of the property and is unchanges.
+ // r2 - holds the name of the property and is unchanged.
Label done;
@@ -190,7 +190,7 @@ void LoadIC::GenerateStringLength(MacroAssembler* masm) {
__ ldr(r0, MemOperand(sp, 0));
- StubCompiler::GenerateLoadStringLength2(masm, r0, r1, r3, &miss);
+ StubCompiler::GenerateLoadStringLength(masm, r0, r1, r3, &miss);
// Cache miss: Jump to runtime.
__ bind(&miss);
StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
@@ -219,14 +219,13 @@ Object* CallIC_Miss(Arguments args);
void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// ----------- S t a t e -------------
- // -- lr: return address
+ // -- r2 : name
+ // -- lr : return address
// -----------------------------------
Label number, non_number, non_string, boolean, probe, miss;
// Get the receiver of the function from the stack into r1.
__ ldr(r1, MemOperand(sp, argc * kPointerSize));
- // Get the name of the function from the stack; 1 ~ receiver.
- __ ldr(r2, MemOperand(sp, (argc + 1) * kPointerSize));
// Probe the stub cache.
Code::Flags flags =
@@ -301,9 +300,9 @@ static void GenerateNormalHelper(MacroAssembler* masm,
// Patch the receiver with the global proxy if necessary.
if (is_global_object) {
- __ ldr(r2, MemOperand(sp, argc * kPointerSize));
- __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
- __ str(r2, MemOperand(sp, argc * kPointerSize));
+ __ ldr(r0, MemOperand(sp, argc * kPointerSize));
+ __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
+ __ str(r0, MemOperand(sp, argc * kPointerSize));
}
// Invoke the function.
@@ -314,14 +313,13 @@ static void GenerateNormalHelper(MacroAssembler* masm,
void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
// ----------- S t a t e -------------
- // -- lr: return address
+ // -- r2 : name
+ // -- lr : return address
// -----------------------------------
Label miss, global_object, non_global_object;
// Get the receiver of the function from the stack into r1.
__ ldr(r1, MemOperand(sp, argc * kPointerSize));
- // Get the name of the function from the stack; 1 ~ receiver.
- __ ldr(r2, MemOperand(sp, (argc + 1) * kPointerSize));
// Check that the receiver isn't a smi.
__ tst(r1, Operand(kSmiTagMask));
@@ -374,18 +372,17 @@ void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
// ----------- S t a t e -------------
- // -- lr: return address
+ // -- r2 : name
+ // -- lr : return address
// -----------------------------------
// Get the receiver of the function from the stack.
- __ ldr(r2, MemOperand(sp, argc * kPointerSize));
- // Get the name of the function to call from the stack.
- __ ldr(r1, MemOperand(sp, (argc + 1) * kPointerSize));
+ __ ldr(r3, MemOperand(sp, argc * kPointerSize));
__ EnterInternalFrame();
// Push the receiver and the name of the function.
- __ stm(db_w, sp, r1.bit() | r2.bit());
+ __ stm(db_w, sp, r2.bit() | r3.bit());
// Call the entry.
__ mov(r0, Operand(2));
@@ -438,7 +435,7 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
StubCache::GenerateProbe(masm, flags, r0, r2, r3, no_reg);
// Cache miss: Jump to runtime.
- Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
+ GenerateMiss(masm);
}
@@ -482,16 +479,11 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
// Cache miss: Restore receiver from stack and jump to runtime.
__ bind(&miss);
- Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
+ GenerateMiss(masm);
}
void LoadIC::GenerateMiss(MacroAssembler* masm) {
- Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
-}
-
-
-void LoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
@@ -502,7 +494,7 @@ void LoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
__ stm(db_w, sp, r2.bit() | r3.bit());
// Perform tail call to the entry.
- __ TailCallRuntime(f, 2, 1);
+ __ TailCallRuntime(ExternalReference(IC_Utility(kLoadIC_Miss)), 2, 1);
}
@@ -530,11 +522,20 @@ Object* KeyedLoadIC_Miss(Arguments args);
void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
- Generate(masm, ExternalReference(IC_Utility(kKeyedLoadIC_Miss)));
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- sp[0] : key
+ // -- sp[4] : receiver
+ // -----------------------------------
+
+ __ ldm(ia, sp, r2.bit() | r3.bit());
+ __ stm(db_w, sp, r2.bit() | r3.bit());
+
+ __ TailCallRuntime(ExternalReference(IC_Utility(kKeyedLoadIC_Miss)), 2, 1);
}
-void KeyedLoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
+void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- lr : return address
// -- sp[0] : key
@@ -544,7 +545,7 @@ void KeyedLoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
__ ldm(ia, sp, r2.bit() | r3.bit());
__ stm(db_w, sp, r2.bit() | r3.bit());
- __ TailCallRuntime(f, 2, 1);
+ __ TailCallRuntime(ExternalReference(Runtime::kGetProperty), 2, 1);
}
@@ -558,17 +559,11 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Get the key and receiver object from the stack.
__ ldm(ia, sp, r0.bit() | r1.bit());
- // Check that the key is a smi.
- __ tst(r0, Operand(kSmiTagMask));
- __ b(ne, &slow);
- __ mov(r0, Operand(r0, ASR, kSmiTagSize));
- // Check that the object isn't a smi.
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &slow);
+ // Check that the object isn't a smi.
+ __ BranchOnSmi(r1, &slow);
// Get the map of the receiver.
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
-
// Check bit field.
__ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset));
__ tst(r3, Operand(kSlowCaseBitFieldMask));
@@ -582,6 +577,10 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ cmp(r2, Operand(JS_OBJECT_TYPE));
__ b(lt, &slow);
+ // Check that the key is a smi.
+ __ BranchOnNotSmi(r0, &slow);
+ __ mov(r0, Operand(r0, ASR, kSmiTagSize));
+
// Get the elements array of the object.
__ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
@@ -597,10 +596,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Slow case: Push extra copies of the arguments (2).
__ bind(&slow);
__ IncrementCounter(&Counters::keyed_load_generic_slow, 1, r0, r1);
- __ ldm(ia, sp, r0.bit() | r1.bit());
- __ stm(db_w, sp, r0.bit() | r1.bit());
- // Do tail-call to runtime routine.
- __ TailCallRuntime(ExternalReference(Runtime::kGetProperty), 2, 1);
+ GenerateRuntimeGetProperty(masm);
// Fast case: Do the load.
__ bind(&fast);
@@ -634,8 +630,47 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
}
-void KeyedStoreIC::Generate(MacroAssembler* masm,
- const ExternalReference& f) {
+void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- sp[0] : key
+ // -- sp[4] : receiver
+ // -----------------------------------
+ Label slow;
+
+ // Get the key and receiver object from the stack.
+ __ ldm(ia, sp, r0.bit() | r1.bit());
+
+ // Check that the receiver isn't a smi.
+ __ BranchOnSmi(r1, &slow);
+
+ // Check that the key is a smi.
+ __ BranchOnNotSmi(r0, &slow);
+
+ // Get the map of the receiver.
+ __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
+
+ // Check that it has indexed interceptor and access checks
+ // are not enabled for this object.
+ __ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset));
+ __ and_(r3, r3, Operand(kSlowCaseBitFieldMask));
+ __ cmp(r3, Operand(1 << Map::kHasIndexedInterceptor));
+ __ b(ne, &slow);
+
+ // Everything is fine, call runtime.
+ __ push(r1); // receiver
+ __ push(r0); // key
+
+ // Perform tail call to the entry.
+ __ TailCallRuntime(ExternalReference(
+ IC_Utility(kKeyedLoadPropertyWithInterceptor)), 2, 1);
+
+ __ bind(&slow);
+ GenerateMiss(masm);
+}
+
+
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- r0 : value
// -- lr : return address
@@ -646,7 +681,21 @@ void KeyedStoreIC::Generate(MacroAssembler* masm,
__ ldm(ia, sp, r2.bit() | r3.bit());
__ stm(db_w, sp, r0.bit() | r2.bit() | r3.bit());
- __ TailCallRuntime(f, 3, 1);
+ __ TailCallRuntime(ExternalReference(IC_Utility(kKeyedStoreIC_Miss)), 3, 1);
+}
+
+
+void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- r0 : value
+ // -- lr : return address
+ // -- sp[0] : key
+ // -- sp[1] : receiver
+ // -----------------------------------
+ __ ldm(ia, sp, r1.bit() | r3.bit()); // r0 == value, r1 == key, r3 == object
+ __ stm(db_w, sp, r0.bit() | r1.bit() | r3.bit());
+
+ __ TailCallRuntime(ExternalReference(Runtime::kSetProperty), 3, 1);
}
@@ -701,12 +750,9 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
__ b(lo, &fast);
- // Slow case: Push extra copies of the arguments (3).
+ // Slow case:
__ bind(&slow);
- __ ldm(ia, sp, r1.bit() | r3.bit()); // r0 == value, r1 == key, r3 == object
- __ stm(db_w, sp, r0.bit() | r1.bit() | r3.bit());
- // Do tail-call to runtime routine.
- __ TailCallRuntime(ExternalReference(Runtime::kSetProperty), 3, 1);
+ GenerateRuntimeSetProperty(masm);
// Extra capacity case: Check if there is extra capacity to
// perform the store and update the length. Used for adding one
@@ -777,33 +823,15 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
}
-void KeyedStoreIC::GenerateExtendStorage(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- r0 : value
- // -- lr : return address
- // -- sp[0] : key
- // -- sp[1] : receiver
- // ----------- S t a t e -------------
-
- __ ldm(ia, sp, r2.bit() | r3.bit());
- __ stm(db_w, sp, r0.bit() | r2.bit() | r3.bit());
-
- // Perform tail call to the entry.
- __ TailCallRuntime(
- ExternalReference(IC_Utility(kSharedStoreIC_ExtendStorage)), 3, 1);
-}
-
-
void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : value
+ // -- r1 : receiver
// -- r2 : name
// -- lr : return address
- // -- [sp] : receiver
// -----------------------------------
// Get the receiver from the stack and probe the stub cache.
- __ ldr(r1, MemOperand(sp));
Code::Flags flags = Code::ComputeFlags(Code::STORE_IC,
NOT_IN_LOOP,
MONOMORPHIC);
@@ -814,36 +842,66 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
}
-void StoreIC::GenerateExtendStorage(MacroAssembler* masm) {
+void StoreIC::GenerateMiss(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : value
+ // -- r1 : receiver
// -- r2 : name
// -- lr : return address
- // -- [sp] : receiver
// -----------------------------------
- __ ldr(r3, MemOperand(sp)); // copy receiver
- __ stm(db_w, sp, r0.bit() | r2.bit() | r3.bit());
+ __ push(r1);
+ __ stm(db_w, sp, r2.bit() | r0.bit());
// Perform tail call to the entry.
- __ TailCallRuntime(
- ExternalReference(IC_Utility(kSharedStoreIC_ExtendStorage)), 3, 1);
+ __ TailCallRuntime(ExternalReference(IC_Utility(kStoreIC_Miss)), 3, 1);
}
-void StoreIC::GenerateMiss(MacroAssembler* masm) {
+void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : value
+ // -- r1 : receiver
// -- r2 : name
// -- lr : return address
- // -- [sp] : receiver
// -----------------------------------
+ //
+ // This accepts as a receiver anything JSObject::SetElementsLength accepts
+ // (currently anything except for external and pixel arrays which means
+ // anything with elements of FixedArray type.), but currently is restricted
+ // to JSArray.
+ // Value must be a number, but only smis are accepted as the most common case.
- __ ldr(r3, MemOperand(sp)); // copy receiver
- __ stm(db_w, sp, r0.bit() | r2.bit() | r3.bit());
+ Label miss;
- // Perform tail call to the entry.
- __ TailCallRuntime(ExternalReference(IC_Utility(kStoreIC_Miss)), 3, 1);
+ Register receiver = r1;
+ Register value = r0;
+ Register scratch = r3;
+
+ // Check that the receiver isn't a smi.
+ __ BranchOnSmi(receiver, &miss);
+
+ // Check that the object is a JS array.
+ __ CompareObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE);
+ __ b(ne, &miss);
+
+ // Check that elements are FixedArray.
+ __ ldr(scratch, FieldMemOperand(receiver, JSArray::kElementsOffset));
+ __ CompareObjectType(scratch, scratch, scratch, FIXED_ARRAY_TYPE);
+ __ b(ne, &miss);
+
+ // Check that value is a smi.
+ __ BranchOnNotSmi(value, &miss);
+
+ // Prepare tail call to StoreIC_ArrayLength.
+ __ push(receiver);
+ __ push(value);
+
+ __ TailCallRuntime(ExternalReference(IC_Utility(kStoreIC_ArrayLength)), 2, 1);
+
+ __ bind(&miss);
+
+ GenerateMiss(masm);
}
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index b39404e7f..b249d696d 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -37,7 +37,6 @@ namespace internal {
MacroAssembler::MacroAssembler(void* buffer, int size)
: Assembler(buffer, size),
- unresolved_(0),
generating_stub_(false),
allow_stub_calls_(true),
code_object_(Heap::undefined_value()) {
@@ -196,7 +195,7 @@ void MacroAssembler::SmiJumpTable(Register index, Vector<Label*> targets) {
void MacroAssembler::LoadRoot(Register destination,
Heap::RootListIndex index,
Condition cond) {
- ldr(destination, MemOperand(r10, index << kPointerSizeLog2), cond);
+ ldr(destination, MemOperand(roots, index << kPointerSizeLog2), cond);
}
@@ -331,14 +330,10 @@ void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode) {
// Push in reverse order: caller_fp, sp_on_exit, and caller_pc.
stm(db_w, sp, fp.bit() | ip.bit() | lr.bit());
- mov(fp, Operand(sp)); // setup new frame pointer
+ mov(fp, Operand(sp)); // Setup new frame pointer.
- if (mode == ExitFrame::MODE_DEBUG) {
- mov(ip, Operand(Smi::FromInt(0)));
- } else {
- mov(ip, Operand(CodeObject()));
- }
- push(ip);
+ mov(ip, Operand(CodeObject()));
+ push(ip); // Accessed from ExitFrame::code_slot.
// Save the frame pointer and the context in top.
mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address)));
@@ -550,6 +545,21 @@ void MacroAssembler::InvokeFunction(Register fun,
}
+void MacroAssembler::InvokeFunction(JSFunction* function,
+ const ParameterCount& actual,
+ InvokeFlag flag) {
+ ASSERT(function->is_compiled());
+
+ // Get the function and setup the context.
+ mov(r1, Operand(Handle<JSFunction>(function)));
+ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+
+ // Invoke the cached code.
+ Handle<Code> code(function->code());
+ ParameterCount expected(function->shared()->formal_parameter_count());
+ InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag);
+}
+
#ifdef ENABLE_DEBUGGER_SUPPORT
void MacroAssembler::SaveRegistersToMemory(RegList regs) {
ASSERT((regs & ~kJSCallerSaved) == 0);
@@ -608,6 +618,15 @@ void MacroAssembler::CopyRegistersFromStackToMemory(Register base,
}
}
}
+
+
+void MacroAssembler::DebugBreak() {
+ ASSERT(allow_stub_calls());
+ mov(r0, Operand(0));
+ mov(r1, Operand(ExternalReference(Runtime::kDebugBreak)));
+ CEntryStub ces(1);
+ Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
+}
#endif
@@ -940,6 +959,113 @@ void MacroAssembler::UndoAllocationInNewSpace(Register object,
}
+void MacroAssembler::AllocateTwoByteString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* gc_required) {
+ // Calculate the number of bytes needed for the characters in the string while
+ // observing object alignment.
+ ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+ mov(scratch1, Operand(length, LSL, 1)); // Length in bytes, not chars.
+ add(scratch1, scratch1,
+ Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize));
+ // AllocateInNewSpace expects the size in words, so we can round down
+ // to kObjectAlignment and divide by kPointerSize in the same shift.
+ ASSERT_EQ(kPointerSize, kObjectAlignmentMask + 1);
+ mov(scratch1, Operand(scratch1, ASR, kPointerSizeLog2));
+
+ // Allocate two-byte string in new space.
+ AllocateInNewSpace(scratch1,
+ result,
+ scratch2,
+ scratch3,
+ gc_required,
+ TAG_OBJECT);
+
+ // Set the map, length and hash field.
+ LoadRoot(scratch1, Heap::kStringMapRootIndex);
+ str(length, FieldMemOperand(result, String::kLengthOffset));
+ str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
+ mov(scratch2, Operand(String::kEmptyHashField));
+ str(scratch2, FieldMemOperand(result, String::kHashFieldOffset));
+}
+
+
+void MacroAssembler::AllocateAsciiString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* gc_required) {
+ // Calculate the number of bytes needed for the characters in the string while
+ // observing object alignment.
+ ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
+ ASSERT(kCharSize == 1);
+ add(scratch1, length,
+ Operand(kObjectAlignmentMask + SeqAsciiString::kHeaderSize));
+ // AllocateInNewSpace expects the size in words, so we can round down
+ // to kObjectAlignment and divide by kPointerSize in the same shift.
+ ASSERT_EQ(kPointerSize, kObjectAlignmentMask + 1);
+ mov(scratch1, Operand(scratch1, ASR, kPointerSizeLog2));
+
+ // Allocate ASCII string in new space.
+ AllocateInNewSpace(scratch1,
+ result,
+ scratch2,
+ scratch3,
+ gc_required,
+ TAG_OBJECT);
+
+ // Set the map, length and hash field.
+ LoadRoot(scratch1, Heap::kAsciiStringMapRootIndex);
+ mov(scratch1, Operand(Factory::ascii_string_map()));
+ str(length, FieldMemOperand(result, String::kLengthOffset));
+ str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
+ mov(scratch2, Operand(String::kEmptyHashField));
+ str(scratch2, FieldMemOperand(result, String::kHashFieldOffset));
+}
+
+
+void MacroAssembler::AllocateTwoByteConsString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ AllocateInNewSpace(ConsString::kSize / kPointerSize,
+ result,
+ scratch1,
+ scratch2,
+ gc_required,
+ TAG_OBJECT);
+ LoadRoot(scratch1, Heap::kConsStringMapRootIndex);
+ mov(scratch2, Operand(String::kEmptyHashField));
+ str(length, FieldMemOperand(result, String::kLengthOffset));
+ str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
+ str(scratch2, FieldMemOperand(result, String::kHashFieldOffset));
+}
+
+
+void MacroAssembler::AllocateAsciiConsString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ AllocateInNewSpace(ConsString::kSize / kPointerSize,
+ result,
+ scratch1,
+ scratch2,
+ gc_required,
+ TAG_OBJECT);
+ LoadRoot(scratch1, Heap::kConsAsciiStringMapRootIndex);
+ mov(scratch2, Operand(String::kEmptyHashField));
+ str(length, FieldMemOperand(result, String::kLengthOffset));
+ str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
+ str(scratch2, FieldMemOperand(result, String::kHashFieldOffset));
+}
+
+
void MacroAssembler::CompareObjectType(Register function,
Register map,
Register type_reg,
@@ -957,6 +1083,21 @@ void MacroAssembler::CompareInstanceType(Register map,
}
+void MacroAssembler::CheckMap(Register obj,
+ Register scratch,
+ Handle<Map> map,
+ Label* fail,
+ bool is_heap_object) {
+ if (!is_heap_object) {
+ BranchOnSmi(obj, fail);
+ }
+ ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+ mov(ip, Operand(map));
+ cmp(scratch, ip);
+ b(ne, fail);
+}
+
+
void MacroAssembler::TryGetFunctionPrototype(Register function,
Register result,
Register scratch,
@@ -1010,10 +1151,17 @@ void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
}
+void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
+ ASSERT(allow_stub_calls()); // stub calls are not allowed in some stubs
+ Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
+}
+
+
void MacroAssembler::StubReturn(int argc) {
ASSERT(argc >= 1 && generating_stub());
- if (argc > 1)
+ if (argc > 1) {
add(sp, sp, Operand((argc - 1) * kPointerSize));
+ }
Ret();
}
@@ -1037,6 +1185,18 @@ void MacroAssembler::IntegerToDoubleConversionWithVFP3(Register inReg,
}
+void MacroAssembler::GetLeastBitsFromSmi(Register dst,
+ Register src,
+ int num_least_bits) {
+ if (CpuFeatures::IsSupported(ARMv7)) {
+ ubfx(dst, src, Operand(kSmiTagSize), Operand(num_least_bits - 1));
+ } else {
+ mov(dst, Operand(src, ASR, kSmiTagSize));
+ and_(dst, dst, Operand((1 << num_least_bits) - 1));
+ }
+}
+
+
void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
// All parameters are on the stack. r0 has the return value after call.
@@ -1064,6 +1224,16 @@ void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
}
+void MacroAssembler::CallExternalReference(const ExternalReference& ext,
+ int num_arguments) {
+ mov(r0, Operand(num_arguments));
+ mov(r1, Operand(ext));
+
+ CEntryStub stub(1);
+ CallStub(&stub);
+}
+
+
void MacroAssembler::TailCallRuntime(const ExternalReference& ext,
int num_arguments,
int result_size) {
@@ -1087,58 +1257,28 @@ void MacroAssembler::JumpToRuntime(const ExternalReference& builtin) {
}
-Handle<Code> MacroAssembler::ResolveBuiltin(Builtins::JavaScript id,
- bool* resolved) {
- // Contract with compiled functions is that the function is passed in r1.
- int builtins_offset =
- JSBuiltinsObject::kJSBuiltinsOffset + (id * kPointerSize);
- ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- ldr(r1, FieldMemOperand(r1, GlobalObject::kBuiltinsOffset));
- ldr(r1, FieldMemOperand(r1, builtins_offset));
-
- return Builtins::GetCode(id, resolved);
-}
-
-
void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
InvokeJSFlags flags) {
- bool resolved;
- Handle<Code> code = ResolveBuiltin(id, &resolved);
-
+ GetBuiltinEntry(r2, id);
if (flags == CALL_JS) {
- Call(code, RelocInfo::CODE_TARGET);
+ Call(r2);
} else {
ASSERT(flags == JUMP_JS);
- Jump(code, RelocInfo::CODE_TARGET);
- }
-
- if (!resolved) {
- const char* name = Builtins::GetName(id);
- int argc = Builtins::GetArgumentsCount(id);
- uint32_t flags =
- Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
- Bootstrapper::FixupFlagsUseCodeObject::encode(false);
- Unresolved entry = { pc_offset() - kInstrSize, flags, name };
- unresolved_.Add(entry);
+ Jump(r2);
}
}
void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
- bool resolved;
- Handle<Code> code = ResolveBuiltin(id, &resolved);
-
- mov(target, Operand(code));
- if (!resolved) {
- const char* name = Builtins::GetName(id);
- int argc = Builtins::GetArgumentsCount(id);
- uint32_t flags =
- Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
- Bootstrapper::FixupFlagsUseCodeObject::encode(true);
- Unresolved entry = { pc_offset() - kInstrSize, flags, name };
- unresolved_.Add(entry);
- }
-
+ // Load the JavaScript builtin function from the builtins object.
+ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ ldr(r1, FieldMemOperand(r1, GlobalObject::kBuiltinsOffset));
+ int builtins_offset =
+ JSBuiltinsObject::kJSBuiltinsOffset + (id * kPointerSize);
+ ldr(r1, FieldMemOperand(r1, builtins_offset));
+ // Load the code entry point from the function into the target register.
+ ldr(target, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ ldr(target, FieldMemOperand(target, SharedFunctionInfo::kCodeOffset));
add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
}
@@ -1238,6 +1378,26 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
}
+void MacroAssembler::JumpIfNotBothSmi(Register reg1,
+ Register reg2,
+ Label* on_not_both_smi) {
+ ASSERT_EQ(0, kSmiTag);
+ tst(reg1, Operand(kSmiTagMask));
+ tst(reg2, Operand(kSmiTagMask), eq);
+ b(ne, on_not_both_smi);
+}
+
+
+void MacroAssembler::JumpIfEitherSmi(Register reg1,
+ Register reg2,
+ Label* on_either_smi) {
+ ASSERT_EQ(0, kSmiTag);
+ tst(reg1, Operand(kSmiTagMask));
+ tst(reg2, Operand(kSmiTagMask), ne);
+ b(eq, on_either_smi);
+}
+
+
void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
Register first,
Register second,
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index efc5bfae7..98cea1638 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -33,10 +33,18 @@
namespace v8 {
namespace internal {
+// ----------------------------------------------------------------------------
+// Static helper functions
+
+// Generate a MemOperand for loading a field from an object.
+static inline MemOperand FieldMemOperand(Register object, int offset) {
+ return MemOperand(object, offset - kHeapObjectTag);
+}
+
// Give alias names to registers
const Register cp = { 8 }; // JavaScript context pointer
-
+const Register roots = { 10 }; // Roots array pointer.
enum InvokeJSFlags {
CALL_JS,
@@ -49,14 +57,7 @@ class MacroAssembler: public Assembler {
public:
MacroAssembler(void* buffer, int size);
- // ---------------------------------------------------------------------------
- // Low-level helpers for compiler
-
- // Jump, Call, and Ret pseudo instructions implementing inter-working
- private:
- void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
- void Call(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
- public:
+ // Jump, Call, and Ret pseudo instructions implementing inter-working.
void Jump(Register target, Condition cond = al);
void Jump(byte* target, RelocInfo::Mode rmode, Condition cond = al);
void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
@@ -134,6 +135,10 @@ class MacroAssembler: public Assembler {
const ParameterCount& actual,
InvokeFlag flag);
+ void InvokeFunction(JSFunction* function,
+ const ParameterCount& actual,
+ InvokeFlag flag);
+
#ifdef ENABLE_DEBUGGER_SUPPORT
// ---------------------------------------------------------------------------
@@ -145,6 +150,7 @@ class MacroAssembler: public Assembler {
void CopyRegistersFromStackToMemory(Register base,
Register scratch,
RegList regs);
+ void DebugBreak();
#endif
// ---------------------------------------------------------------------------
@@ -209,6 +215,31 @@ class MacroAssembler: public Assembler {
// allocation is undone.
void UndoAllocationInNewSpace(Register object, Register scratch);
+
+ void AllocateTwoByteString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* gc_required);
+ void AllocateAsciiString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* gc_required);
+ void AllocateTwoByteConsString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+ void AllocateAsciiConsString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+
+
// ---------------------------------------------------------------------------
// Support functions.
@@ -243,6 +274,29 @@ class MacroAssembler: public Assembler {
Register type_reg,
InstanceType type);
+
+ // Check if the map of an object is equal to a specified map and
+ // branch to label if not. Skip the smi check if not required
+ // (object is known to be a heap object)
+ void CheckMap(Register obj,
+ Register scratch,
+ Handle<Map> map,
+ Label* fail,
+ bool is_heap_object);
+
+ // Load and check the instance type of an object for being a string.
+ // Loads the type into the second argument register.
+ // Returns a condition that will be enabled if the object was a string.
+ Condition IsObjectStringType(Register obj,
+ Register type) {
+ ldr(type, FieldMemOperand(obj, HeapObject::kMapOffset));
+ ldrb(type, FieldMemOperand(type, Map::kInstanceTypeOffset));
+ tst(type, Operand(kIsNotStringMask));
+ ASSERT_EQ(0, kStringTag);
+ return eq;
+ }
+
+
inline void BranchOnSmi(Register value, Label* smi_label) {
tst(value, Operand(kSmiTagMask));
b(eq, smi_label);
@@ -257,6 +311,9 @@ class MacroAssembler: public Assembler {
// occurred.
void IllegalOperation(int num_arguments);
+ // Get the number of least significant bits from a register
+ void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
+
// Uses VFP instructions to Convert a Smi to a double.
void IntegerToDoubleConversionWithVFP3(Register inReg,
Register outHighReg,
@@ -269,6 +326,9 @@ class MacroAssembler: public Assembler {
// Call a code stub.
void CallStub(CodeStub* stub, Condition cond = al);
+ // Call a code stub.
+ void TailCallStub(CodeStub* stub, Condition cond = al);
+
// Return from a code stub after popping its arguments.
void StubReturn(int argc);
@@ -279,6 +339,10 @@ class MacroAssembler: public Assembler {
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId fid, int num_arguments);
+ // Convenience function: call an external reference.
+ void CallExternalReference(const ExternalReference& ext,
+ int num_arguments);
+
// Tail call of a runtime routine (jump).
// Like JumpToRuntime, but also takes care of passing the number
// of parameters.
@@ -297,13 +361,6 @@ class MacroAssembler: public Assembler {
// setup the function in r1.
void GetBuiltinEntry(Register target, Builtins::JavaScript id);
- struct Unresolved {
- int pc;
- uint32_t flags; // see Bootstrapper::FixupFlags decoders/encoders.
- const char* name;
- };
- List<Unresolved>* unresolved() { return &unresolved_; }
-
Handle<Object> CodeObject() { return code_object_; }
@@ -338,6 +395,14 @@ class MacroAssembler: public Assembler {
bool allow_stub_calls() { return allow_stub_calls_; }
// ---------------------------------------------------------------------------
+ // Smi utilities
+
+ // Jump if either of the registers contain a non-smi.
+ void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
+ // Jump if either of the registers contain a smi.
+ void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
+
+ // ---------------------------------------------------------------------------
// String utilities
// Checks if both objects are sequential ASCII strings and jumps to label
@@ -357,11 +422,8 @@ class MacroAssembler: public Assembler {
Label* not_flat_ascii_strings);
private:
- List<Unresolved> unresolved_;
- bool generating_stub_;
- bool allow_stub_calls_;
- Handle<Object> code_object_; // This handle will be patched with the code
- // object on installation.
+ void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
+ void Call(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
// Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected,
@@ -371,21 +433,14 @@ class MacroAssembler: public Assembler {
Label* done,
InvokeFlag flag);
- // Prepares for a call or jump to a builtin by doing two things:
- // 1. Emits code that fetches the builtin's function object from the context
- // at runtime, and puts it in the register rdi.
- // 2. Fetches the builtin's code object, and returns it in a handle, at
- // compile time, so that later code can emit instructions to jump or call
- // the builtin directly. If the code object has not yet been created, it
- // returns the builtin code object for IllegalFunction, and sets the
- // output parameter "resolved" to false. Code that uses the return value
- // should then add the address and the builtin name to the list of fixups
- // called unresolved_, which is fixed up by the bootstrapper.
- Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
-
// Activation support.
void EnterFrame(StackFrame::Type type);
void LeaveFrame(StackFrame::Type type);
+
+ bool generating_stub_;
+ bool allow_stub_calls_;
+ // This handle will be patched with the code object on installation.
+ Handle<Object> code_object_;
};
@@ -421,12 +476,6 @@ class CodePatcher {
// -----------------------------------------------------------------------------
// Static helper functions.
-// Generate a MemOperand for loading a field from an object.
-static inline MemOperand FieldMemOperand(Register object, int offset) {
- return MemOperand(object, offset - kHeapObjectTag);
-}
-
-
#ifdef GENERATED_CODE_COVERAGE
#define CODE_COVERAGE_STRINGIFY(x) #x
#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index f5431512f..cee5aea0d 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -1741,7 +1741,7 @@ void Simulator::DecodeType2(Instr* instr) {
void Simulator::DecodeType3(Instr* instr) {
- ASSERT(instr->Bit(4) == 0);
+ ASSERT(instr->Bits(6, 4) == 0x5 || instr->Bit(4) == 0);
int rd = instr->RdField();
int rn = instr->RnField();
int32_t rn_val = get_register(rn);
@@ -1768,10 +1768,26 @@ void Simulator::DecodeType3(Instr* instr) {
break;
}
case 3: {
- // Format(instr, "'memop'cond'b 'rd, ['rn, +'shift_rm]'w");
- addr = rn_val + shifter_operand;
- if (instr->HasW()) {
- set_register(rn, addr);
+ if (instr->HasW() && (instr->Bits(6, 4) == 0x5)) {
+ uint32_t widthminus1 = static_cast<uint32_t>(instr->Bits(20, 16));
+ uint32_t lsbit = static_cast<uint32_t>(instr->ShiftAmountField());
+ uint32_t msbit = widthminus1 + lsbit;
+ if (msbit <= 31) {
+ uint32_t rm_val =
+ static_cast<uint32_t>(get_register(instr->RmField()));
+ uint32_t extr_val = rm_val << (31 - msbit);
+ extr_val = extr_val >> (31 - widthminus1);
+ set_register(instr->RdField(), extr_val);
+ } else {
+ UNREACHABLE();
+ }
+ return;
+ } else {
+ // Format(instr, "'memop'cond'b 'rd, ['rn, +'shift_rm]'w");
+ addr = rn_val + shifter_operand;
+ if (instr->HasW()) {
+ set_register(rn, addr);
+ }
}
break;
}
@@ -1785,7 +1801,8 @@ void Simulator::DecodeType3(Instr* instr) {
uint8_t byte = ReadB(addr);
set_register(rd, byte);
} else {
- UNIMPLEMENTED();
+ uint8_t byte = get_register(rd);
+ WriteB(addr, byte);
}
} else {
if (instr->HasL()) {
diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc
index d19a683dc..da7394215 100644
--- a/deps/v8/src/arm/stub-cache-arm.cc
+++ b/deps/v8/src/arm/stub-cache-arm.cc
@@ -189,8 +189,9 @@ void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
}
-// Generate code to check if an object is a string. If the object is
-// a string, the map's instance type is left in the scratch1 register.
+// Generate code to check if an object is a string. If the object is a
+// heap object, its map's instance type is left in the scratch1 register.
+// If this is not needed, scratch1 and scratch2 may be the same register.
static void GenerateStringCheck(MacroAssembler* masm,
Register receiver,
Register scratch1,
@@ -215,18 +216,16 @@ static void GenerateStringCheck(MacroAssembler* masm,
// If the receiver object is not a string or a wrapped string object the
// execution continues at the miss label. The register containing the
// receiver is potentially clobbered.
-void StubCompiler::GenerateLoadStringLength2(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* miss) {
- Label check_string, check_wrapper;
-
- __ bind(&check_string);
+void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Label* miss) {
+ Label check_wrapper;
+
// Check if the object is a string leaving the instance type in the
// scratch1 register.
- GenerateStringCheck(masm, receiver, scratch1, scratch2,
- miss, &check_wrapper);
+ GenerateStringCheck(masm, receiver, scratch1, scratch2, miss, &check_wrapper);
// Load length directly from the string.
__ ldr(r0, FieldMemOperand(receiver, String::kLengthOffset));
@@ -238,9 +237,12 @@ void StubCompiler::GenerateLoadStringLength2(MacroAssembler* masm,
__ cmp(scratch1, Operand(JS_VALUE_TYPE));
__ b(ne, miss);
- // Unwrap the value in place and check if the wrapped value is a string.
- __ ldr(receiver, FieldMemOperand(receiver, JSValue::kValueOffset));
- __ b(&check_string);
+ // Unwrap the value and check if the wrapped value is a string.
+ __ ldr(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
+ GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
+ __ ldr(r0, FieldMemOperand(scratch1, String::kLengthOffset));
+ __ mov(r0, Operand(r0, LSL, kSmiTagSize));
+ __ Ret();
}
@@ -256,10 +258,10 @@ void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
// Generate StoreField code, value is passed in r0 register.
-// After executing generated code, the receiver_reg and name_reg
-// may be clobbered.
+// When leaving generated code after success, the receiver_reg and name_reg
+// may be clobbered. Upon branch to miss_label, the receiver and name
+// registers have their original values.
void StubCompiler::GenerateStoreField(MacroAssembler* masm,
- Builtins::Name storage_extend,
JSObject* object,
int index,
Map* transition,
@@ -292,11 +294,12 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
if ((transition != NULL) && (object->map()->unused_property_fields() == 0)) {
// The properties must be extended before we can store the value.
// We jump to a runtime call that extends the properties array.
+ __ push(receiver_reg);
__ mov(r2, Operand(Handle<Map>(transition)));
- // Please note, if we implement keyed store for arm we need
- // to call the Builtins::KeyedStoreIC_ExtendStorage.
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_ExtendStorage));
- __ Jump(ic, RelocInfo::CODE_TARGET);
+ __ stm(db_w, sp, r2.bit() | r0.bit());
+ __ TailCallRuntime(
+ ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage)),
+ 3, 1);
return;
}
@@ -373,7 +376,7 @@ static void GenerateCallFunction(MacroAssembler* masm,
// Check that the function really is a function.
__ BranchOnSmi(r1, miss);
- __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
+ __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
__ b(ne, miss);
// Patch the receiver on the stack with the global proxy if
@@ -388,68 +391,6 @@ static void GenerateCallFunction(MacroAssembler* masm,
}
-static void GenerateCallConstFunction(MacroAssembler* masm,
- JSFunction* function,
- const ParameterCount& arguments) {
- ASSERT(function->is_compiled());
-
- // Get the function and setup the context.
- __ mov(r1, Operand(Handle<JSFunction>(function)));
- __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
-
- // Jump to the cached code (tail call).
- Handle<Code> code(function->code());
- ParameterCount expected(function->shared()->formal_parameter_count());
- __ InvokeCode(code, expected, arguments,
- RelocInfo::CODE_TARGET, JUMP_FUNCTION);
-}
-
-
-template <class Compiler>
-static void CompileLoadInterceptor(Compiler* compiler,
- StubCompiler* stub_compiler,
- MacroAssembler* masm,
- JSObject* object,
- JSObject* holder,
- String* name,
- LookupResult* lookup,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* miss) {
- ASSERT(holder->HasNamedInterceptor());
- ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
-
- // Check that the receiver isn't a smi.
- __ BranchOnSmi(receiver, miss);
-
- // Check that the maps haven't changed.
- Register reg =
- stub_compiler->CheckPrototypes(object, receiver, holder,
- scratch1, scratch2, name, miss);
-
- if (lookup->IsValid() && lookup->IsCacheable()) {
- compiler->CompileCacheable(masm,
- stub_compiler,
- receiver,
- reg,
- scratch1,
- scratch2,
- holder,
- lookup,
- name,
- miss);
- } else {
- compiler->CompileRegular(masm,
- receiver,
- reg,
- scratch2,
- holder,
- miss);
- }
-}
-
-
static void PushInterceptorArguments(MacroAssembler* masm,
Register receiver,
Register holder,
@@ -500,7 +441,7 @@ class LoadInterceptorCompiler BASE_EMBEDDED {
LookupResult* lookup,
String* name,
Label* miss_label) {
- AccessorInfo* callback = 0;
+ AccessorInfo* callback = NULL;
bool optimize = false;
// So far the most popular follow ups for interceptor loads are FIELD
// and CALLBACKS, so inline only them, other cases may be added
@@ -523,9 +464,7 @@ class LoadInterceptorCompiler BASE_EMBEDDED {
// Note: starting a frame here makes GC aware of pointers pushed below.
__ EnterInternalFrame();
- if (lookup->type() == CALLBACKS) {
- __ push(receiver);
- }
+ __ push(receiver);
__ push(holder);
__ push(name_);
@@ -546,10 +485,7 @@ class LoadInterceptorCompiler BASE_EMBEDDED {
__ bind(&interceptor_failed);
__ pop(name_);
__ pop(holder);
-
- if (lookup->type() == CALLBACKS) {
- __ pop(receiver);
- }
+ __ pop(receiver);
__ LeaveInternalFrame();
@@ -621,108 +557,48 @@ class LoadInterceptorCompiler BASE_EMBEDDED {
};
-class CallInterceptorCompiler BASE_EMBEDDED {
- public:
- CallInterceptorCompiler(const ParameterCount& arguments, Register name)
- : arguments_(arguments), argc_(arguments.immediate()), name_(name) {}
-
- void CompileCacheable(MacroAssembler* masm,
- StubCompiler* stub_compiler,
- Register receiver,
- Register holder,
- Register scratch1,
- Register scratch2,
- JSObject* holder_obj,
- LookupResult* lookup,
- String* name,
- Label* miss_label) {
- JSFunction* function = 0;
- bool optimize = false;
- // So far the most popular case for failed interceptor is
- // CONSTANT_FUNCTION sitting below.
- if (lookup->type() == CONSTANT_FUNCTION) {
- function = lookup->GetConstantFunction();
- // JSArray holder is a special case for call constant function
- // (see the corresponding code).
- if (function->is_compiled() && !holder_obj->IsJSArray()) {
- optimize = true;
- }
- }
-
- if (!optimize) {
- CompileRegular(masm, receiver, holder, scratch2, holder_obj, miss_label);
- return;
- }
-
- // Constant functions cannot sit on global object.
- ASSERT(!lookup->holder()->IsGlobalObject());
-
- __ EnterInternalFrame();
- __ push(holder); // Save the holder.
- __ push(name_); // Save the name.
-
- CompileCallLoadPropertyWithInterceptor(masm,
- receiver,
- holder,
- name_,
- holder_obj);
-
- ASSERT(!r0.is(name_));
- ASSERT(!r0.is(scratch1));
- __ pop(name_); // Restore the name.
- __ pop(scratch1); // Restore the holder.
- __ LeaveInternalFrame();
-
- // Compare with no_interceptor_result_sentinel.
- __ LoadRoot(scratch2, Heap::kNoInterceptorResultSentinelRootIndex);
- __ cmp(r0, scratch2);
- Label invoke;
- __ b(ne, &invoke);
-
- stub_compiler->CheckPrototypes(holder_obj, scratch1,
- lookup->holder(), scratch1,
- scratch2,
- name,
- miss_label);
- GenerateCallConstFunction(masm, function, arguments_);
+static void CompileLoadInterceptor(LoadInterceptorCompiler* compiler,
+ StubCompiler* stub_compiler,
+ MacroAssembler* masm,
+ JSObject* object,
+ JSObject* holder,
+ String* name,
+ LookupResult* lookup,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Label* miss) {
+ ASSERT(holder->HasNamedInterceptor());
+ ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
- __ bind(&invoke);
- }
+ // Check that the receiver isn't a smi.
+ __ BranchOnSmi(receiver, miss);
- void CompileRegular(MacroAssembler* masm,
- Register receiver,
- Register holder,
- Register scratch,
- JSObject* holder_obj,
- Label* miss_label) {
- __ EnterInternalFrame();
- // Save the name_ register across the call.
- __ push(name_);
+ // Check that the maps haven't changed.
+ Register reg =
+ stub_compiler->CheckPrototypes(object, receiver, holder,
+ scratch1, scratch2, name, miss);
- PushInterceptorArguments(masm,
+ if (lookup->IsProperty() && lookup->IsCacheable()) {
+ compiler->CompileCacheable(masm,
+ stub_compiler,
+ receiver,
+ reg,
+ scratch1,
+ scratch2,
+ holder,
+ lookup,
+ name,
+ miss);
+ } else {
+ compiler->CompileRegular(masm,
receiver,
+ reg,
+ scratch2,
holder,
- name_,
- holder_obj);
-
- ExternalReference ref = ExternalReference(
- IC_Utility(IC::kLoadPropertyWithInterceptorForCall));
- __ mov(r0, Operand(5));
- __ mov(r1, Operand(ref));
-
- CEntryStub stub(1);
- __ CallStub(&stub);
-
- // Restore the name_ register.
- __ pop(name_);
- __ LeaveInternalFrame();
+ miss);
}
-
- private:
- const ParameterCount& arguments_;
- int argc_;
- Register name_;
-};
+}
#undef __
@@ -735,7 +611,11 @@ Register StubCompiler::CheckPrototypes(JSObject* object,
Register holder_reg,
Register scratch,
String* name,
+ int save_at_depth,
Label* miss) {
+ // TODO(602): support object saving.
+ ASSERT(save_at_depth == kInvalidProtoDepth);
+
// Check that the maps haven't changed.
Register result =
masm()->CheckMaps(object, object_reg, holder, holder_reg, scratch, miss);
@@ -762,7 +642,7 @@ Register StubCompiler::CheckPrototypes(JSObject* object,
object = JSObject::cast(object->GetPrototype());
}
- // Return the register containin the holder.
+ // Return the register containing the holder.
return result;
}
@@ -901,12 +781,13 @@ Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
}
-Object* CallStubCompiler::CompileCallField(Object* object,
+Object* CallStubCompiler::CompileCallField(JSObject* object,
JSObject* holder,
int index,
String* name) {
// ----------- S t a t e -------------
- // -- lr: return address
+ // -- r2 : name
+ // -- lr : return address
// -----------------------------------
Label miss;
@@ -919,8 +800,7 @@ Object* CallStubCompiler::CompileCallField(Object* object,
__ b(eq, &miss);
// Do the right check and compute the holder register.
- Register reg =
- CheckPrototypes(JSObject::cast(object), r0, holder, r3, r2, name, &miss);
+ Register reg = CheckPrototypes(object, r0, holder, r1, r3, name, &miss);
GenerateFastPropertyLoad(masm(), r1, reg, holder, index);
GenerateCallFunction(masm(), object, arguments(), &miss);
@@ -941,7 +821,8 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
String* name,
CheckType check) {
// ----------- S t a t e -------------
- // -- lr: return address
+ // -- r2 : name
+ // -- lr : return address
// -----------------------------------
Label miss;
@@ -962,7 +843,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
switch (check) {
case RECEIVER_MAP_CHECK:
// Check that the maps haven't changed.
- CheckPrototypes(JSObject::cast(object), r1, holder, r3, r2, name, &miss);
+ CheckPrototypes(JSObject::cast(object), r1, holder, r3, r0, name, &miss);
// Patch the receiver on the stack with the global proxy if
// necessary.
@@ -978,13 +859,13 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
__ jmp(&miss);
} else {
// Check that the object is a two-byte string or a symbol.
- __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE);
+ __ CompareObjectType(r1, r3, r3, FIRST_NONSTRING_TYPE);
__ b(hs, &miss);
// Check that the maps starting from the prototype haven't changed.
GenerateLoadGlobalFunctionPrototype(masm(),
Context::STRING_FUNCTION_INDEX,
- r2);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), r2, holder, r3,
+ r0);
+ CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3,
r1, name, &miss);
}
break;
@@ -998,14 +879,14 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
// Check that the object is a smi or a heap number.
__ tst(r1, Operand(kSmiTagMask));
__ b(eq, &fast);
- __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
+ __ CompareObjectType(r1, r0, r0, HEAP_NUMBER_TYPE);
__ b(ne, &miss);
__ bind(&fast);
// Check that the maps starting from the prototype haven't changed.
GenerateLoadGlobalFunctionPrototype(masm(),
Context::NUMBER_FUNCTION_INDEX,
- r2);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), r2, holder, r3,
+ r0);
+ CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3,
r1, name, &miss);
}
break;
@@ -1028,22 +909,22 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
// Check that the maps starting from the prototype haven't changed.
GenerateLoadGlobalFunctionPrototype(masm(),
Context::BOOLEAN_FUNCTION_INDEX,
- r2);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), r2, holder, r3,
+ r0);
+ CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3,
r1, name, &miss);
}
break;
}
case JSARRAY_HAS_FAST_ELEMENTS_CHECK:
- CheckPrototypes(JSObject::cast(object), r1, holder, r3, r2, name, &miss);
+ CheckPrototypes(JSObject::cast(object), r1, holder, r3, r0, name, &miss);
// Make sure object->HasFastElements().
// Get the elements array of the object.
__ ldr(r3, FieldMemOperand(r1, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
- __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ ldr(r0, FieldMemOperand(r3, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
- __ cmp(r2, ip);
+ __ cmp(r0, ip);
__ b(ne, &miss);
break;
@@ -1051,7 +932,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
UNREACHABLE();
}
- GenerateCallConstFunction(masm(), function, arguments());
+ __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
// Handle call cache miss.
__ bind(&miss);
@@ -1067,14 +948,22 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
}
-Object* CallStubCompiler::CompileCallInterceptor(Object* object,
+Object* CallStubCompiler::CompileCallInterceptor(JSObject* object,
JSObject* holder,
String* name) {
// ----------- S t a t e -------------
- // -- lr: return address
+ // -- r2 : name
+ // -- lr : return address
// -----------------------------------
+ ASSERT(holder->HasNamedInterceptor());
+ ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
Label miss;
+ const Register receiver = r0;
+ const Register holder_reg = r1;
+ const Register name_reg = r2;
+ const Register scratch = r3;
+
// Get the number of arguments.
const int argc = arguments().immediate();
@@ -1083,24 +972,79 @@ Object* CallStubCompiler::CompileCallInterceptor(Object* object,
// Get the receiver from the stack into r0.
__ ldr(r0, MemOperand(sp, argc * kPointerSize));
- // Load the name from the stack into r1.
- __ ldr(r1, MemOperand(sp, (argc + 1) * kPointerSize));
- CallInterceptorCompiler compiler(arguments(), r1);
- CompileLoadInterceptor(&compiler,
- this,
- masm(),
- JSObject::cast(object),
- holder,
- name,
- &lookup,
- r0,
- r2,
- r3,
- &miss);
+ // Check that the receiver isn't a smi.
+ __ BranchOnSmi(receiver, &miss);
+
+ // Check that the maps haven't changed.
+ Register reg = CheckPrototypes(object, receiver, holder, holder_reg,
+ scratch, name, &miss);
+ if (!reg.is(holder_reg)) {
+ __ mov(holder_reg, reg);
+ }
+
+ // If we call a constant function when the interceptor returns
+ // the no-result sentinel, generate code that optimizes this case.
+ if (lookup.IsProperty() &&
+ lookup.IsCacheable() &&
+ lookup.type() == CONSTANT_FUNCTION &&
+ lookup.GetConstantFunction()->is_compiled() &&
+ !holder->IsJSArray()) {
+ // Constant functions cannot sit on global object.
+ ASSERT(!lookup.holder()->IsGlobalObject());
+
+ // Call the interceptor.
+ __ EnterInternalFrame();
+ __ push(holder_reg);
+ __ push(name_reg);
+ CompileCallLoadPropertyWithInterceptor(masm(),
+ receiver,
+ holder_reg,
+ name_reg,
+ holder);
+ __ pop(name_reg);
+ __ pop(holder_reg);
+ __ LeaveInternalFrame();
+ // r0 no longer contains the receiver.
+
+ // If interceptor returns no-result sentinal, call the constant function.
+ __ LoadRoot(scratch, Heap::kNoInterceptorResultSentinelRootIndex);
+ __ cmp(r0, scratch);
+ Label invoke;
+ __ b(ne, &invoke);
+ // Check the prototypes between the interceptor's holder and the
+ // constant function's holder.
+ CheckPrototypes(holder, holder_reg,
+ lookup.holder(), r0,
+ scratch,
+ name,
+ &miss);
+
+ __ InvokeFunction(lookup.GetConstantFunction(),
+ arguments(),
+ JUMP_FUNCTION);
+
+ __ bind(&invoke);
+
+ } else {
+ // Call a runtime function to load the interceptor property.
+ __ EnterInternalFrame();
+ __ push(name_reg);
+
+ PushInterceptorArguments(masm(), receiver, holder_reg, name_reg, holder);
+ __ CallExternalReference(
+ ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall)),
+ 5);
+
+ __ pop(name_reg);
+ __ LeaveInternalFrame();
+ }
+
+ // Move returned value, the function to call, to r1.
+ __ mov(r1, r0);
// Restore receiver.
- __ ldr(r0, MemOperand(sp, argc * kPointerSize));
+ __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
GenerateCallFunction(masm(), object, arguments(), &miss);
@@ -1120,7 +1064,8 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
JSFunction* function,
String* name) {
// ----------- S t a t e -------------
- // -- lr: return address
+ // -- r2 : name
+ // -- lr : return address
// -----------------------------------
Label miss;
@@ -1139,7 +1084,7 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
}
// Check that the maps haven't changed.
- CheckPrototypes(object, r0, holder, r3, r2, name, &miss);
+ CheckPrototypes(object, r0, holder, r3, r1, name, &miss);
// Get the value from the cell.
__ mov(r3, Operand(Handle<JSGlobalPropertyCell>(cell)));
@@ -1159,8 +1104,8 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
// Check the shared function info. Make sure it hasn't changed.
__ mov(r3, Operand(Handle<SharedFunctionInfo>(function->shared())));
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ cmp(r2, r3);
+ __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ cmp(r4, r3);
__ b(ne, &miss);
} else {
__ cmp(r1, Operand(Handle<JSFunction>(function)));
@@ -1178,7 +1123,7 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
// Jump to the cached code (tail call).
- __ IncrementCounter(&Counters::call_global_inline, 1, r2, r3);
+ __ IncrementCounter(&Counters::call_global_inline, 1, r1, r3);
ASSERT(function->is_compiled());
Handle<Code> code(function->code());
ParameterCount expected(function->shared()->formal_parameter_count());
@@ -1202,25 +1147,19 @@ Object* StoreStubCompiler::CompileStoreField(JSObject* object,
String* name) {
// ----------- S t a t e -------------
// -- r0 : value
+ // -- r1 : receiver
// -- r2 : name
// -- lr : return address
- // -- [sp] : receiver
// -----------------------------------
Label miss;
- // Get the receiver from the stack.
- __ ldr(r3, MemOperand(sp, 0 * kPointerSize));
-
- // name register might be clobbered.
GenerateStoreField(masm(),
- Builtins::StoreIC_ExtendStorage,
object,
index,
transition,
- r3, r2, r1,
+ r1, r2, r3,
&miss);
__ bind(&miss);
- __ mov(r2, Operand(Handle<String>(name))); // restore name
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
__ Jump(ic, RelocInfo::CODE_TARGET);
@@ -1234,39 +1173,33 @@ Object* StoreStubCompiler::CompileStoreCallback(JSObject* object,
String* name) {
// ----------- S t a t e -------------
// -- r0 : value
+ // -- r1 : receiver
// -- r2 : name
// -- lr : return address
- // -- [sp] : receiver
// -----------------------------------
Label miss;
- // Get the object from the stack.
- __ ldr(r3, MemOperand(sp, 0 * kPointerSize));
-
// Check that the object isn't a smi.
- __ tst(r3, Operand(kSmiTagMask));
+ __ tst(r1, Operand(kSmiTagMask));
__ b(eq, &miss);
// Check that the map of the object hasn't changed.
- __ ldr(r1, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ cmp(r1, Operand(Handle<Map>(object->map())));
+ __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ cmp(r3, Operand(Handle<Map>(object->map())));
__ b(ne, &miss);
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(r3, r1, &miss);
+ __ CheckAccessGlobalProxy(r1, r3, &miss);
}
// Stub never generated for non-global objects that require access
// checks.
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
- __ ldr(ip, MemOperand(sp)); // receiver
- __ push(ip);
+ __ push(r1); // receiver
__ mov(ip, Operand(Handle<AccessorInfo>(callback))); // callback info
- __ push(ip);
- __ push(r2); // name
- __ push(r0); // value
+ __ stm(db_w, sp, ip.bit() | r2.bit() | r0.bit());
// Do tail-call to the runtime system.
ExternalReference store_callback_property =
@@ -1287,37 +1220,33 @@ Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
String* name) {
// ----------- S t a t e -------------
// -- r0 : value
+ // -- r1 : receiver
// -- r2 : name
// -- lr : return address
- // -- [sp] : receiver
// -----------------------------------
Label miss;
- // Get the object from the stack.
- __ ldr(r3, MemOperand(sp, 0 * kPointerSize));
-
// Check that the object isn't a smi.
- __ tst(r3, Operand(kSmiTagMask));
+ __ tst(r1, Operand(kSmiTagMask));
__ b(eq, &miss);
// Check that the map of the object hasn't changed.
- __ ldr(r1, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ cmp(r1, Operand(Handle<Map>(receiver->map())));
+ __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ cmp(r3, Operand(Handle<Map>(receiver->map())));
__ b(ne, &miss);
// Perform global security token check if needed.
if (receiver->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(r3, r1, &miss);
+ __ CheckAccessGlobalProxy(r1, r3, &miss);
}
- // Stub never generated for non-global objects that require access
+ // Stub is never generated for non-global objects that require access
// checks.
ASSERT(receiver->IsJSGlobalProxy() || !receiver->IsAccessCheckNeeded());
- __ ldr(ip, MemOperand(sp)); // receiver
- __ push(ip);
- __ push(r2); // name
- __ push(r0); // value
+ __ push(r1); // receiver.
+ __ push(r2); // name.
+ __ push(r0); // value.
// Do tail-call to the runtime system.
ExternalReference store_ic_property =
@@ -1339,14 +1268,13 @@ Object* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
String* name) {
// ----------- S t a t e -------------
// -- r0 : value
+ // -- r1 : receiver
// -- r2 : name
// -- lr : return address
- // -- [sp] : receiver
// -----------------------------------
Label miss;
// Check that the map of the global has not changed.
- __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
__ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
__ cmp(r3, Operand(Handle<Map>(object->map())));
__ b(ne, &miss);
@@ -1355,12 +1283,12 @@ Object* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
__ mov(r2, Operand(Handle<JSGlobalPropertyCell>(cell)));
__ str(r0, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
- __ IncrementCounter(&Counters::named_store_global_inline, 1, r1, r3);
+ __ IncrementCounter(&Counters::named_store_global_inline, 1, r4, r3);
__ Ret();
// Handle store cache miss.
__ bind(&miss);
- __ IncrementCounter(&Counters::named_store_global_inline_miss, 1, r1, r3);
+ __ IncrementCounter(&Counters::named_store_global_inline_miss, 1, r4, r3);
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
__ Jump(ic, RelocInfo::CODE_TARGET);
@@ -1672,7 +1600,7 @@ Object* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
__ cmp(r2, Operand(Handle<String>(name)));
__ b(ne, &miss);
- GenerateLoadStringLength2(masm(), r0, r1, r3, &miss);
+ GenerateLoadStringLength(masm(), r0, r1, r3, &miss);
__ bind(&miss);
__ DecrementCounter(&Counters::keyed_load_string_length, 1, r1, r3);
@@ -1717,7 +1645,6 @@ Object* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
__ ldr(r3, MemOperand(sp));
// r1 is used as scratch register, r3 and r2 might be clobbered.
GenerateStoreField(masm(),
- Builtins::StoreIC_ExtendStorage,
object,
index,
transition,
diff --git a/deps/v8/src/arm/virtual-frame-arm.cc b/deps/v8/src/arm/virtual-frame-arm.cc
index 7a8ac7266..0f7c59712 100644
--- a/deps/v8/src/arm/virtual-frame-arm.cc
+++ b/deps/v8/src/arm/virtual-frame-arm.cc
@@ -47,7 +47,7 @@ VirtualFrame::VirtualFrame()
: elements_(parameter_count() + local_count() + kPreallocatedElements),
stack_pointer_(parameter_count()) { // 0-based index of TOS.
for (int i = 0; i <= stack_pointer_; i++) {
- elements_.Add(FrameElement::MemoryElement());
+ elements_.Add(FrameElement::MemoryElement(NumberInfo::kUnknown));
}
for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
register_locations_[i] = kIllegalIndex;
@@ -233,6 +233,14 @@ void VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
}
+#ifdef ENABLE_DEBUGGER_SUPPORT
+void VirtualFrame::DebugBreak() {
+ ASSERT(cgen()->HasValidEntryRegisters());
+ __ DebugBreak();
+}
+#endif
+
+
void VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
InvokeJSFlags flags,
int arg_count) {
@@ -305,7 +313,7 @@ void VirtualFrame::EmitPop(Register reg) {
void VirtualFrame::EmitPush(Register reg) {
ASSERT(stack_pointer_ == element_count() - 1);
- elements_.Add(FrameElement::MemoryElement());
+ elements_.Add(FrameElement::MemoryElement(NumberInfo::kUnknown));
stack_pointer_++;
__ push(reg);
}
diff --git a/deps/v8/src/arm/virtual-frame-arm.h b/deps/v8/src/arm/virtual-frame-arm.h
index 9a2f7d360..a45cfc6e1 100644
--- a/deps/v8/src/arm/virtual-frame-arm.h
+++ b/deps/v8/src/arm/virtual-frame-arm.h
@@ -68,7 +68,8 @@ class VirtualFrame : public ZoneObject {
MacroAssembler* masm() { return cgen()->masm(); }
// Create a duplicate of an existing valid frame element.
- FrameElement CopyElementAt(int index);
+ FrameElement CopyElementAt(int index,
+ NumberInfo::Type info = NumberInfo::kUnknown);
// The number of elements on the virtual frame.
int element_count() { return elements_.length(); }
@@ -297,6 +298,10 @@ class VirtualFrame : public ZoneObject {
void CallRuntime(Runtime::Function* f, int arg_count);
void CallRuntime(Runtime::FunctionId id, int arg_count);
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ void DebugBreak();
+#endif
+
// Invoke builtin given the number of arguments it expects on (and
// removes from) the stack.
void InvokeBuiltin(Builtins::JavaScript id,
@@ -339,7 +344,7 @@ class VirtualFrame : public ZoneObject {
void EmitPushMultiple(int count, int src_regs);
// Push an element on the virtual frame.
- void Push(Register reg);
+ void Push(Register reg, NumberInfo::Type info = NumberInfo::kUnknown);
void Push(Handle<Object> value);
void Push(Smi* value) { Push(Handle<Object>(value)); }
diff --git a/deps/v8/src/array.js b/deps/v8/src/array.js
index c3ab179da..c28a66298 100644
--- a/deps/v8/src/array.js
+++ b/deps/v8/src/array.js
@@ -566,10 +566,11 @@ function ArraySlice(start, end) {
function ArraySplice(start, delete_count) {
var num_arguments = %_ArgumentsLength();
- // SpiderMonkey and KJS return undefined in the case where no
+ // SpiderMonkey and JSC return undefined in the case where no
// arguments are given instead of using the implicit undefined
// arguments. This does not follow ECMA-262, but we do the same for
// compatibility.
+ // TraceMonkey follows ECMA-262 though.
if (num_arguments == 0) return;
var len = TO_UINT32(this.length);
@@ -582,7 +583,7 @@ function ArraySplice(start, delete_count) {
if (start_i > len) start_i = len;
}
- // SpiderMonkey and KJS treat the case where no delete count is
+ // SpiderMonkey, TraceMonkey and JSC treat the case where no delete count is
// given differently from when an undefined delete count is given.
// This does not follow ECMA-262, but we do the same for
// compatibility.
diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc
index dbf2742b2..96d516f18 100644
--- a/deps/v8/src/assembler.cc
+++ b/deps/v8/src/assembler.cc
@@ -430,6 +430,11 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
return "code target (js construct call)";
case RelocInfo::CODE_TARGET_CONTEXT:
return "code target (context)";
+ case RelocInfo::DEBUG_BREAK:
+#ifndef ENABLE_DEBUGGER_SUPPORT
+ UNREACHABLE();
+#endif
+ return "debug break";
case RelocInfo::CODE_TARGET:
return "code target";
case RelocInfo::RUNTIME_ENTRY:
@@ -485,6 +490,11 @@ void RelocInfo::Verify() {
case EMBEDDED_OBJECT:
Object::VerifyPointer(target_object());
break;
+ case DEBUG_BREAK:
+#ifndef ENABLE_DEBUGGER_SUPPORT
+ UNREACHABLE();
+ break;
+#endif
case CONSTRUCT_CALL:
case CODE_TARGET_CONTEXT:
case CODE_TARGET: {
diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h
index ec47d5712..f4013061e 100644
--- a/deps/v8/src/assembler.h
+++ b/deps/v8/src/assembler.h
@@ -119,6 +119,7 @@ class RelocInfo BASE_EMBEDDED {
// Please note the order is important (see IsCodeTarget, IsGCRelocMode).
CONSTRUCT_CALL, // code target that is a call to a JavaScript constructor.
CODE_TARGET_CONTEXT, // code target used for contextual loads.
+ DEBUG_BREAK,
CODE_TARGET, // code target which is not any of the above.
EMBEDDED_OBJECT,
EMBEDDED_STRING,
@@ -506,8 +507,10 @@ static inline bool is_intn(int x, int n) {
return -(1 << (n-1)) <= x && x < (1 << (n-1));
}
-static inline bool is_int24(int x) { return is_intn(x, 24); }
static inline bool is_int8(int x) { return is_intn(x, 8); }
+static inline bool is_int16(int x) { return is_intn(x, 16); }
+static inline bool is_int18(int x) { return is_intn(x, 18); }
+static inline bool is_int24(int x) { return is_intn(x, 24); }
static inline bool is_uintn(int x, int n) {
return (x & -(1 << n)) == 0;
@@ -519,9 +522,20 @@ static inline bool is_uint4(int x) { return is_uintn(x, 4); }
static inline bool is_uint5(int x) { return is_uintn(x, 5); }
static inline bool is_uint6(int x) { return is_uintn(x, 6); }
static inline bool is_uint8(int x) { return is_uintn(x, 8); }
+static inline bool is_uint10(int x) { return is_uintn(x, 10); }
static inline bool is_uint12(int x) { return is_uintn(x, 12); }
static inline bool is_uint16(int x) { return is_uintn(x, 16); }
static inline bool is_uint24(int x) { return is_uintn(x, 24); }
+static inline bool is_uint26(int x) { return is_uintn(x, 26); }
+static inline bool is_uint28(int x) { return is_uintn(x, 28); }
+
+static inline int NumberOfBitsSet(uint32_t x) {
+ unsigned int num_bits_set;
+ for (num_bits_set = 0; x; x >>= 1) {
+ num_bits_set += x & 1;
+ }
+ return num_bits_set;
+}
} } // namespace v8::internal
diff --git a/deps/v8/src/ast.h b/deps/v8/src/ast.h
index 48d0bfac0..8e717a6c8 100644
--- a/deps/v8/src/ast.h
+++ b/deps/v8/src/ast.h
@@ -102,6 +102,7 @@ namespace internal {
// Forward declarations
class TargetCollector;
class MaterializedLiteral;
+class DefinitionInfo;
#define DEF_FORWARD_DECLARATION(type) class type;
AST_NODE_LIST(DEF_FORWARD_DECLARATION)
@@ -182,7 +183,7 @@ class Expression: public AstNode {
static const int kNoLabel = -1;
- Expression() : num_(kNoLabel) {}
+ Expression() : num_(kNoLabel), def_(NULL), defined_vars_(NULL) {}
virtual Expression* AsExpression() { return this; }
@@ -193,6 +194,11 @@ class Expression: public AstNode {
// names because [] for string objects is handled only by keyed ICs.
virtual bool IsPropertyName() { return false; }
+ // True if the expression does not have (evaluated) subexpressions.
+ // Function literals are leaves because their subexpressions are not
+ // evaluated.
+ virtual bool IsLeaf() { return false; }
+
// Mark the expression as being compiled as an expression
// statement. This is used to transform postfix increments to
// (faster) prefix increments.
@@ -206,9 +212,20 @@ class Expression: public AstNode {
// AST node numbering ordered by evaluation order.
void set_num(int n) { num_ = n; }
+ // Data flow information.
+ DefinitionInfo* var_def() { return def_; }
+ void set_var_def(DefinitionInfo* def) { def_ = def; }
+
+ ZoneList<DefinitionInfo*>* defined_vars() { return defined_vars_; }
+ void set_defined_vars(ZoneList<DefinitionInfo*>* defined_vars) {
+ defined_vars_ = defined_vars;
+ }
+
private:
StaticType type_;
int num_;
+ DefinitionInfo* def_;
+ ZoneList<DefinitionInfo*>* defined_vars_;
};
@@ -720,6 +737,8 @@ class Literal: public Expression {
return false;
}
+ virtual bool IsLeaf() { return true; }
+
// Identity testers.
bool IsNull() const { return handle_.is_identical_to(Factory::null_value()); }
bool IsTrue() const { return handle_.is_identical_to(Factory::true_value()); }
@@ -802,6 +821,8 @@ class ObjectLiteral: public MaterializedLiteral {
virtual ObjectLiteral* AsObjectLiteral() { return this; }
virtual void Accept(AstVisitor* v);
+ virtual bool IsLeaf() { return properties()->is_empty(); }
+
Handle<FixedArray> constant_properties() const {
return constant_properties_;
}
@@ -825,6 +846,8 @@ class RegExpLiteral: public MaterializedLiteral {
virtual void Accept(AstVisitor* v);
+ virtual bool IsLeaf() { return true; }
+
Handle<String> pattern() const { return pattern_; }
Handle<String> flags() const { return flags_; }
@@ -849,6 +872,8 @@ class ArrayLiteral: public MaterializedLiteral {
virtual void Accept(AstVisitor* v);
virtual ArrayLiteral* AsArrayLiteral() { return this; }
+ virtual bool IsLeaf() { return values()->is_empty(); }
+
Handle<FixedArray> constant_elements() const { return constant_elements_; }
ZoneList<Expression*>* values() const { return values_; }
@@ -896,6 +921,11 @@ class VariableProxy: public Expression {
return var_ == NULL ? true : var_->IsValidLeftHandSide();
}
+ virtual bool IsLeaf() {
+ ASSERT(var_ != NULL); // Variable must be resolved.
+ return var()->is_global() || var()->rewrite()->IsLeaf();
+ }
+
bool IsVariable(Handle<String> n) {
return !is_this() && name().is_identical_to(n);
}
@@ -981,6 +1011,8 @@ class Slot: public Expression {
// Type testing & conversion
virtual Slot* AsSlot() { return this; }
+ virtual bool IsLeaf() { return true; }
+
// Accessors
Variable* var() const { return var_; }
Type type() const { return type_; }
@@ -1337,6 +1369,8 @@ class FunctionLiteral: public Expression {
// Type testing & conversion
virtual FunctionLiteral* AsFunctionLiteral() { return this; }
+ virtual bool IsLeaf() { return true; }
+
Handle<String> name() const { return name_; }
Scope* scope() const { return scope_; }
ZoneList<Statement*>* body() const { return body_; }
@@ -1403,6 +1437,8 @@ class FunctionBoilerplateLiteral: public Expression {
Handle<JSFunction> boilerplate() const { return boilerplate_; }
+ virtual bool IsLeaf() { return true; }
+
virtual void Accept(AstVisitor* v);
private:
@@ -1413,6 +1449,7 @@ class FunctionBoilerplateLiteral: public Expression {
class ThisFunction: public Expression {
public:
virtual void Accept(AstVisitor* v);
+ virtual bool IsLeaf() { return true; }
};
diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc
index 78d09952a..a7cf421b5 100644
--- a/deps/v8/src/bootstrapper.cc
+++ b/deps/v8/src/bootstrapper.cc
@@ -192,116 +192,6 @@ void Bootstrapper::TearDown() {
}
-// Pending fixups are code positions that refer to builtin code
-// objects that were not available at the time the code was generated.
-// The pending list is processed whenever an environment has been
-// created.
-class PendingFixups : public AllStatic {
- public:
- static void Add(Code* code, MacroAssembler* masm);
- static bool Process(Handle<JSBuiltinsObject> builtins);
-
- static void Iterate(ObjectVisitor* v);
-
- private:
- static List<Object*> code_;
- static List<const char*> name_;
- static List<int> pc_;
- static List<uint32_t> flags_;
-
- static void Clear();
-};
-
-
-List<Object*> PendingFixups::code_(0);
-List<const char*> PendingFixups::name_(0);
-List<int> PendingFixups::pc_(0);
-List<uint32_t> PendingFixups::flags_(0);
-
-
-void PendingFixups::Add(Code* code, MacroAssembler* masm) {
- // Note this code is not only called during bootstrapping.
- List<MacroAssembler::Unresolved>* unresolved = masm->unresolved();
- int n = unresolved->length();
- for (int i = 0; i < n; i++) {
- const char* name = unresolved->at(i).name;
- code_.Add(code);
- name_.Add(name);
- pc_.Add(unresolved->at(i).pc);
- flags_.Add(unresolved->at(i).flags);
- LOG(StringEvent("unresolved", name));
- }
-}
-
-
-bool PendingFixups::Process(Handle<JSBuiltinsObject> builtins) {
- HandleScope scope;
- // NOTE: Extra fixups may be added to the list during the iteration
- // due to lazy compilation of functions during the processing. Do not
- // cache the result of getting the length of the code list.
- for (int i = 0; i < code_.length(); i++) {
- const char* name = name_[i];
- uint32_t flags = flags_[i];
- Handle<String> symbol = Factory::LookupAsciiSymbol(name);
- Object* o = builtins->GetProperty(*symbol);
-#ifdef DEBUG
- if (!o->IsJSFunction()) {
- V8_Fatal(__FILE__, __LINE__, "Cannot resolve call to builtin %s", name);
- }
-#endif
- Handle<SharedFunctionInfo> shared(JSFunction::cast(o)->shared());
- // Make sure the number of parameters match the formal parameter count.
- int argc = Bootstrapper::FixupFlagsArgumentsCount::decode(flags);
- USE(argc);
- ASSERT(shared->formal_parameter_count() == argc);
- // Do lazy compilation if necessary and check for stack overflows.
- if (!EnsureCompiled(shared, CLEAR_EXCEPTION)) {
- Clear();
- return false;
- }
- Code* code = Code::cast(code_[i]);
- Address pc = code->instruction_start() + pc_[i];
- RelocInfo target(pc, RelocInfo::CODE_TARGET, 0);
- bool use_code_object = Bootstrapper::FixupFlagsUseCodeObject::decode(flags);
- if (use_code_object) {
- target.set_target_object(shared->code());
- } else {
- target.set_target_address(shared->code()->instruction_start());
- }
- LOG(StringEvent("resolved", name));
- }
- Clear();
-
- // TODO(1240818): We should probably try to avoid doing this for all
- // the V8 builtin JS files. It should only happen after running
- // runtime.js - just like there shouldn't be any fixups left after
- // that.
- for (int i = 0; i < Builtins::NumberOfJavaScriptBuiltins(); i++) {
- Builtins::JavaScript id = static_cast<Builtins::JavaScript>(i);
- Handle<String> name = Factory::LookupAsciiSymbol(Builtins::GetName(id));
- JSFunction* function = JSFunction::cast(builtins->GetProperty(*name));
- builtins->set_javascript_builtin(id, function);
- }
-
- return true;
-}
-
-
-void PendingFixups::Clear() {
- code_.Clear();
- name_.Clear();
- pc_.Clear();
- flags_.Clear();
-}
-
-
-void PendingFixups::Iterate(ObjectVisitor* v) {
- if (!code_.is_empty()) {
- v->VisitPointers(&code_[0], &code_[0] + code_.length());
- }
-}
-
-
class Genesis BASE_EMBEDDED {
public:
Genesis(Handle<Object> global_object,
@@ -338,6 +228,7 @@ class Genesis BASE_EMBEDDED {
bool InstallExtension(const char* name);
bool InstallExtension(v8::RegisteredExtension* current);
bool InstallSpecialObjects();
+ bool InstallJSBuiltins(Handle<JSBuiltinsObject> builtins);
bool ConfigureApiObject(Handle<JSObject> object,
Handle<ObjectTemplateInfo> object_template);
bool ConfigureGlobalObjects(v8::Handle<v8::ObjectTemplate> global_template);
@@ -379,15 +270,6 @@ void Bootstrapper::Iterate(ObjectVisitor* v) {
v->Synchronize("NativesCache");
extensions_cache.Iterate(v);
v->Synchronize("Extensions");
- PendingFixups::Iterate(v);
- v->Synchronize("PendingFixups");
-}
-
-
-// While setting up the environment, we collect code positions that
-// need to be patched before we can run any code in the environment.
-void Bootstrapper::AddFixup(Code* code, MacroAssembler* masm) {
- PendingFixups::Add(code, masm);
}
@@ -841,11 +723,11 @@ void Genesis::CreateRoots(v8::Handle<v8::ObjectTemplate> global_template,
#ifdef DEBUG
LookupResult lookup;
result->LocalLookup(Heap::callee_symbol(), &lookup);
- ASSERT(lookup.IsValid() && (lookup.type() == FIELD));
+ ASSERT(lookup.IsProperty() && (lookup.type() == FIELD));
ASSERT(lookup.GetFieldIndex() == Heap::arguments_callee_index);
result->LocalLookup(Heap::length_symbol(), &lookup);
- ASSERT(lookup.IsValid() && (lookup.type() == FIELD));
+ ASSERT(lookup.IsProperty() && (lookup.type() == FIELD));
ASSERT(lookup.GetFieldIndex() == Heap::arguments_length_index);
ASSERT(result->map()->inobject_properties() > Heap::arguments_callee_index);
@@ -942,7 +824,8 @@ bool Genesis::CompileScriptCached(Vector<const char> name,
ASSERT(source->IsAsciiRepresentation());
Handle<String> script_name = Factory::NewStringFromUtf8(name);
boilerplate =
- Compiler::Compile(source, script_name, 0, 0, extension, NULL);
+ Compiler::Compile(source, script_name, 0, 0, extension, NULL,
+ Handle<String>::null());
if (boilerplate.is_null()) return false;
cache->Add(name, boilerplate);
}
@@ -968,8 +851,7 @@ bool Genesis::CompileScriptCached(Vector<const char> name,
Handle<Object> result =
Execution::Call(fun, receiver, 0, NULL, &has_pending_exception);
if (has_pending_exception) return false;
- return PendingFixups::Process(
- Handle<JSBuiltinsObject>(Top::context()->builtins()));
+ return true;
}
@@ -989,7 +871,6 @@ void Genesis::InstallNativeFunctions() {
INSTALL_NATIVE(JSFunction, "ToInteger", to_integer_fun);
INSTALL_NATIVE(JSFunction, "ToUint32", to_uint32_fun);
INSTALL_NATIVE(JSFunction, "ToInt32", to_int32_fun);
- INSTALL_NATIVE(JSFunction, "ToBoolean", to_boolean_fun);
INSTALL_NATIVE(JSFunction, "GlobalEval", global_eval_fun);
INSTALL_NATIVE(JSFunction, "Instantiate", instantiate_fun);
INSTALL_NATIVE(JSFunction, "ConfigureTemplateInstance",
@@ -1176,6 +1057,10 @@ bool Genesis::InstallNatives() {
i < Natives::GetBuiltinsCount();
i++) {
if (!CompileBuiltin(i)) return false;
+ // TODO(ager): We really only need to install the JS builtin
+ // functions on the builtins object after compiling and running
+ // runtime.js.
+ if (!InstallJSBuiltins(builtins)) return false;
}
// Setup natives with lazy loading.
@@ -1377,6 +1262,22 @@ bool Genesis::InstallExtension(v8::RegisteredExtension* current) {
}
+bool Genesis::InstallJSBuiltins(Handle<JSBuiltinsObject> builtins) {
+ HandleScope scope;
+ for (int i = 0; i < Builtins::NumberOfJavaScriptBuiltins(); i++) {
+ Builtins::JavaScript id = static_cast<Builtins::JavaScript>(i);
+ Handle<String> name = Factory::LookupAsciiSymbol(Builtins::GetName(id));
+ Handle<JSFunction> function
+ = Handle<JSFunction>(JSFunction::cast(builtins->GetProperty(*name)));
+ builtins->set_javascript_builtin(id, *function);
+ Handle<SharedFunctionInfo> shared
+ = Handle<SharedFunctionInfo>(function->shared());
+ if (!EnsureCompiled(shared, CLEAR_EXCEPTION)) return false;
+ }
+ return true;
+}
+
+
bool Genesis::ConfigureGlobalObjects(
v8::Handle<v8::ObjectTemplate> global_proxy_template) {
Handle<JSObject> global_proxy(
@@ -1451,7 +1352,7 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
LookupResult result;
to->LocalLookup(descs->GetKey(i), &result);
// If the property is already there we skip it
- if (result.IsValid()) continue;
+ if (result.IsProperty()) continue;
HandleScope inner;
Handle<DescriptorArray> inst_descs =
Handle<DescriptorArray>(to->map()->instance_descriptors());
@@ -1488,7 +1389,7 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
// If the property is already there we skip it.
LookupResult result;
to->LocalLookup(String::cast(raw_key), &result);
- if (result.IsValid()) continue;
+ if (result.IsProperty()) continue;
// Set the property.
Handle<String> key = Handle<String>(String::cast(raw_key));
Handle<Object> value = Handle<Object>(properties->ValueAt(i));
@@ -1572,25 +1473,33 @@ void Genesis::AddSpecialFunction(Handle<JSObject> prototype,
void Genesis::BuildSpecialFunctionTable() {
HandleScope scope;
Handle<JSObject> global = Handle<JSObject>(global_context()->global());
- // Add special versions for Array.prototype.pop and push.
+ // Add special versions for some Array.prototype functions.
Handle<JSFunction> function =
Handle<JSFunction>(
JSFunction::cast(global->GetProperty(Heap::Array_symbol())));
Handle<JSObject> visible_prototype =
Handle<JSObject>(JSObject::cast(function->prototype()));
- // Remember to put push and pop on the hidden prototype if it's there.
- Handle<JSObject> push_and_pop_prototype;
+ // Remember to put those specializations on the hidden prototype if present.
+ Handle<JSObject> special_prototype;
Handle<Object> superproto(visible_prototype->GetPrototype());
if (superproto->IsJSObject() &&
JSObject::cast(*superproto)->map()->is_hidden_prototype()) {
- push_and_pop_prototype = Handle<JSObject>::cast(superproto);
+ special_prototype = Handle<JSObject>::cast(superproto);
} else {
- push_and_pop_prototype = visible_prototype;
+ special_prototype = visible_prototype;
}
- AddSpecialFunction(push_and_pop_prototype, "pop",
+ AddSpecialFunction(special_prototype, "pop",
Handle<Code>(Builtins::builtin(Builtins::ArrayPop)));
- AddSpecialFunction(push_and_pop_prototype, "push",
+ AddSpecialFunction(special_prototype, "push",
Handle<Code>(Builtins::builtin(Builtins::ArrayPush)));
+ AddSpecialFunction(special_prototype, "shift",
+ Handle<Code>(Builtins::builtin(Builtins::ArrayShift)));
+ AddSpecialFunction(special_prototype, "unshift",
+ Handle<Code>(Builtins::builtin(Builtins::ArrayUnshift)));
+ AddSpecialFunction(special_prototype, "slice",
+ Handle<Code>(Builtins::builtin(Builtins::ArraySlice)));
+ AddSpecialFunction(special_prototype, "splice",
+ Handle<Code>(Builtins::builtin(Builtins::ArraySplice)));
}
diff --git a/deps/v8/src/bootstrapper.h b/deps/v8/src/bootstrapper.h
index 7cd3a2bbf..cc775b284 100644
--- a/deps/v8/src/bootstrapper.h
+++ b/deps/v8/src/bootstrapper.h
@@ -59,9 +59,6 @@ class Bootstrapper : public AllStatic {
Handle<JSFunction>* handle);
static void NativesCacheAdd(Vector<const char> name, Handle<JSFunction> fun);
- // Append code that needs fixup at the end of boot strapping.
- static void AddFixup(Code* code, MacroAssembler* masm);
-
// Tells whether bootstrapping is active.
static bool IsActive();
diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc
index db0770f3a..8e88c2869 100644
--- a/deps/v8/src/builtins.cc
+++ b/deps/v8/src/builtins.cc
@@ -168,28 +168,6 @@ static inline bool CalledAsConstructor() {
// ----------------------------------------------------------------------------
-Handle<Code> Builtins::GetCode(JavaScript id, bool* resolved) {
- Code* code = Builtins::builtin(Builtins::Illegal);
- *resolved = false;
-
- if (Top::context() != NULL) {
- Object* object = Top::builtins()->javascript_builtin(id);
- if (object->IsJSFunction()) {
- Handle<SharedFunctionInfo> shared(JSFunction::cast(object)->shared());
- // Make sure the number of parameters match the formal parameter count.
- ASSERT(shared->formal_parameter_count() ==
- Builtins::GetArgumentsCount(id));
- if (EnsureCompiled(shared, CLEAR_EXCEPTION)) {
- code = shared->code();
- *resolved = true;
- }
- }
- }
-
- return Handle<Code>(code);
-}
-
-
BUILTIN(Illegal) {
UNREACHABLE();
return Heap::undefined_value(); // Make compiler happy.
@@ -268,19 +246,19 @@ BUILTIN(ArrayPush) {
JSArray* array = JSArray::cast(*args.receiver());
ASSERT(array->HasFastElements());
- // Make sure we have space for the elements.
int len = Smi::cast(array->length())->value();
+ int to_add = args.length() - 1;
+ if (to_add == 0) {
+ return Smi::FromInt(len);
+ }
+ // Currently fixed arrays cannot grow too big, so
+ // we should never hit this case.
+ ASSERT(to_add <= (Smi::kMaxValue - len));
- // Set new length.
- int new_length = len + args.length() - 1;
+ int new_length = len + to_add;
FixedArray* elms = FixedArray::cast(array->elements());
- if (new_length <= elms->length()) {
- // Backing storage has extra space for the provided values.
- for (int index = 0; index < args.length() - 1; index++) {
- elms->set(index + len, args[index+1]);
- }
- } else {
+ if (new_length > elms->length()) {
// New backing storage is needed.
int capacity = new_length + (new_length >> 1) + 16;
Object* obj = Heap::AllocateFixedArrayWithHoles(capacity);
@@ -291,16 +269,21 @@ BUILTIN(ArrayPush) {
WriteBarrierMode mode = new_elms->GetWriteBarrierMode(no_gc);
// Fill out the new array with old elements.
for (int i = 0; i < len; i++) new_elms->set(i, elms->get(i), mode);
- // Add the provided values.
- for (int index = 0; index < args.length() - 1; index++) {
- new_elms->set(index + len, args[index+1], mode);
- }
- // Set the new backing storage.
- array->set_elements(new_elms);
+ elms = new_elms;
+ array->set_elements(elms);
+ }
+
+ AssertNoAllocation no_gc;
+ WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
+
+ // Add the provided values.
+ for (int index = 0; index < to_add; index++) {
+ elms->set(index + len, args[index + 1], mode);
}
+
// Set the length.
array->set_length(Smi::FromInt(new_length));
- return array->length();
+ return Smi::FromInt(new_length);
}
@@ -335,6 +318,355 @@ BUILTIN(ArrayPop) {
}
+static Object* GetElementToMove(uint32_t index,
+ FixedArray* elms,
+ JSObject* prototype) {
+ Object* e = elms->get(index);
+ if (e->IsTheHole() && prototype->HasElement(index)) {
+ e = prototype->GetElement(index);
+ }
+ return e;
+}
+
+
+BUILTIN(ArrayShift) {
+ JSArray* array = JSArray::cast(*args.receiver());
+ ASSERT(array->HasFastElements());
+
+ int len = Smi::cast(array->length())->value();
+ if (len == 0) return Heap::undefined_value();
+
+ // Fetch the prototype.
+ JSFunction* array_function =
+ Top::context()->global_context()->array_function();
+ JSObject* prototype = JSObject::cast(array_function->prototype());
+
+ FixedArray* elms = FixedArray::cast(array->elements());
+
+ // Get first element
+ Object* first = elms->get(0);
+ if (first->IsTheHole()) {
+ first = prototype->GetElement(0);
+ }
+
+ // Shift the elements.
+ for (int i = 0; i < len - 1; i++) {
+ elms->set(i, GetElementToMove(i + 1, elms, prototype));
+ }
+ elms->set(len - 1, Heap::the_hole_value());
+
+ // Set the length.
+ array->set_length(Smi::FromInt(len - 1));
+
+ return first;
+}
+
+
+BUILTIN(ArrayUnshift) {
+ JSArray* array = JSArray::cast(*args.receiver());
+ ASSERT(array->HasFastElements());
+
+ int len = Smi::cast(array->length())->value();
+ int to_add = args.length() - 1;
+ // Note that we cannot quit early if to_add == 0 as
+ // values should be lifted from prototype into
+ // the array.
+
+ int new_length = len + to_add;
+ // Currently fixed arrays cannot grow too big, so
+ // we should never hit this case.
+ ASSERT(to_add <= (Smi::kMaxValue - len));
+
+ FixedArray* elms = FixedArray::cast(array->elements());
+
+ // Fetch the prototype.
+ JSFunction* array_function =
+ Top::context()->global_context()->array_function();
+ JSObject* prototype = JSObject::cast(array_function->prototype());
+
+ if (new_length > elms->length()) {
+ // New backing storage is needed.
+ int capacity = new_length + (new_length >> 1) + 16;
+ Object* obj = Heap::AllocateFixedArrayWithHoles(capacity);
+ if (obj->IsFailure()) return obj;
+
+ AssertNoAllocation no_gc;
+ FixedArray* new_elms = FixedArray::cast(obj);
+ WriteBarrierMode mode = new_elms->GetWriteBarrierMode(no_gc);
+ // Fill out the new array with old elements.
+ for (int i = 0; i < len; i++)
+ new_elms->set(to_add + i,
+ GetElementToMove(i, elms, prototype),
+ mode);
+
+ elms = new_elms;
+ array->set_elements(elms);
+ } else {
+ AssertNoAllocation no_gc;
+ WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
+
+ // Move elements to the right
+ for (int i = 0; i < len; i++) {
+ elms->set(new_length - i - 1,
+ GetElementToMove(len - i - 1, elms, prototype),
+ mode);
+ }
+ }
+
+ // Add the provided values.
+ AssertNoAllocation no_gc;
+ WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
+ for (int i = 0; i < to_add; i++) {
+ elms->set(i, args[i + 1], mode);
+ }
+
+ // Set the length.
+ array->set_length(Smi::FromInt(new_length));
+ return Smi::FromInt(new_length);
+}
+
+
+static Object* CallJsBuiltin(const char* name,
+ BuiltinArguments<NO_EXTRA_ARGUMENTS> args) {
+ HandleScope handleScope;
+
+ Handle<Object> js_builtin =
+ GetProperty(Handle<JSObject>(Top::global_context()->builtins()),
+ name);
+ ASSERT(js_builtin->IsJSFunction());
+ Handle<JSFunction> function(Handle<JSFunction>::cast(js_builtin));
+ Vector<Object**> argv(Vector<Object**>::New(args.length() - 1));
+ int n_args = args.length() - 1;
+ for (int i = 0; i < n_args; i++) {
+ argv[i] = &args[i + 1];
+ }
+ bool pending_exception = false;
+ Handle<Object> result = Execution::Call(function,
+ args.receiver(),
+ n_args,
+ argv.start(),
+ &pending_exception);
+ if (pending_exception) return Failure::Exception();
+ return *result;
+}
+
+
+BUILTIN(ArraySlice) {
+ JSArray* array = JSArray::cast(*args.receiver());
+ ASSERT(array->HasFastElements());
+
+ int len = Smi::cast(array->length())->value();
+
+ int n_arguments = args.length() - 1;
+
+ // Note carefully choosen defaults---if argument is missing,
+ // it's undefined which gets converted to 0 for relativeStart
+ // and to len for relativeEnd.
+ int relativeStart = 0;
+ int relativeEnd = len;
+ if (n_arguments > 0) {
+ Object* arg1 = args[1];
+ if (arg1->IsSmi()) {
+ relativeStart = Smi::cast(arg1)->value();
+ } else if (!arg1->IsUndefined()) {
+ return CallJsBuiltin("ArraySlice", args);
+ }
+ if (n_arguments > 1) {
+ Object* arg2 = args[2];
+ if (arg2->IsSmi()) {
+ relativeEnd = Smi::cast(arg2)->value();
+ } else if (!arg2->IsUndefined()) {
+ return CallJsBuiltin("ArraySlice", args);
+ }
+ }
+ }
+
+ // ECMAScript 232, 3rd Edition, Section 15.4.4.10, step 6.
+ int k = (relativeStart < 0) ? Max(len + relativeStart, 0)
+ : Min(relativeStart, len);
+
+ // ECMAScript 232, 3rd Edition, Section 15.4.4.10, step 8.
+ int final = (relativeEnd < 0) ? Max(len + relativeEnd, 0)
+ : Min(relativeEnd, len);
+
+ // Calculate the length of result array.
+ int result_len = final - k;
+ if (result_len < 0) {
+ result_len = 0;
+ }
+
+ JSFunction* array_function =
+ Top::context()->global_context()->array_function();
+ Object* result = Heap::AllocateJSObject(array_function);
+ if (result->IsFailure()) return result;
+ JSArray* result_array = JSArray::cast(result);
+
+ result = Heap::AllocateFixedArrayWithHoles(result_len);
+ if (result->IsFailure()) return result;
+ FixedArray* result_elms = FixedArray::cast(result);
+
+ FixedArray* elms = FixedArray::cast(array->elements());
+
+ // Fetch the prototype.
+ JSObject* prototype = JSObject::cast(array_function->prototype());
+
+ AssertNoAllocation no_gc;
+ WriteBarrierMode mode = result_elms->GetWriteBarrierMode(no_gc);
+
+ // Fill newly created array.
+ for (int i = 0; i < result_len; i++) {
+ result_elms->set(i,
+ GetElementToMove(k + i, elms, prototype),
+ mode);
+ }
+
+ // Set elements.
+ result_array->set_elements(result_elms);
+
+ // Set the length.
+ result_array->set_length(Smi::FromInt(result_len));
+ return result_array;
+}
+
+
+BUILTIN(ArraySplice) {
+ JSArray* array = JSArray::cast(*args.receiver());
+ ASSERT(array->HasFastElements());
+
+ int len = Smi::cast(array->length())->value();
+
+ int n_arguments = args.length() - 1;
+
+ // SpiderMonkey and JSC return undefined in the case where no
+ // arguments are given instead of using the implicit undefined
+ // arguments. This does not follow ECMA-262, but we do the same for
+ // compatibility.
+ // TraceMonkey follows ECMA-262 though.
+ if (n_arguments == 0) {
+ return Heap::undefined_value();
+ }
+
+ int relativeStart = 0;
+ Object* arg1 = args[1];
+ if (arg1->IsSmi()) {
+ relativeStart = Smi::cast(arg1)->value();
+ } else if (!arg1->IsUndefined()) {
+ return CallJsBuiltin("ArraySplice", args);
+ }
+ int actualStart = (relativeStart < 0) ? Max(len + relativeStart, 0)
+ : Min(relativeStart, len);
+
+ // SpiderMonkey, TraceMonkey and JSC treat the case where no delete count is
+ // given differently from when an undefined delete count is given.
+ // This does not follow ECMA-262, but we do the same for
+ // compatibility.
+ int deleteCount = len;
+ if (n_arguments > 1) {
+ Object* arg2 = args[2];
+ if (arg2->IsSmi()) {
+ deleteCount = Smi::cast(arg2)->value();
+ } else {
+ return CallJsBuiltin("ArraySplice", args);
+ }
+ }
+ int actualDeleteCount = Min(Max(deleteCount, 0), len - actualStart);
+
+ JSFunction* array_function =
+ Top::context()->global_context()->array_function();
+
+ // Allocate result array.
+ Object* result = Heap::AllocateJSObject(array_function);
+ if (result->IsFailure()) return result;
+ JSArray* result_array = JSArray::cast(result);
+
+ result = Heap::AllocateFixedArrayWithHoles(actualDeleteCount);
+ if (result->IsFailure()) return result;
+ FixedArray* result_elms = FixedArray::cast(result);
+
+ FixedArray* elms = FixedArray::cast(array->elements());
+
+ // Fetch the prototype.
+ JSObject* prototype = JSObject::cast(array_function->prototype());
+
+ AssertNoAllocation no_gc;
+ WriteBarrierMode mode = result_elms->GetWriteBarrierMode(no_gc);
+
+ // Fill newly created array.
+ for (int k = 0; k < actualDeleteCount; k++) {
+ result_elms->set(k,
+ GetElementToMove(actualStart + k, elms, prototype),
+ mode);
+ }
+
+ // Set elements.
+ result_array->set_elements(result_elms);
+
+ // Set the length.
+ result_array->set_length(Smi::FromInt(actualDeleteCount));
+
+ int itemCount = (n_arguments > 1) ? (n_arguments - 2) : 0;
+
+ int new_length = len - actualDeleteCount + itemCount;
+
+ mode = elms->GetWriteBarrierMode(no_gc);
+ if (itemCount < actualDeleteCount) {
+ // Shrink the array.
+ for (int k = actualStart; k < (len - actualDeleteCount); k++) {
+ elms->set(k + itemCount,
+ GetElementToMove(k + actualDeleteCount, elms, prototype),
+ mode);
+ }
+
+ for (int k = len; k > new_length; k--) {
+ elms->set(k - 1, Heap::the_hole_value());
+ }
+ } else if (itemCount > actualDeleteCount) {
+ // Currently fixed arrays cannot grow too big, so
+ // we should never hit this case.
+ ASSERT((itemCount - actualDeleteCount) <= (Smi::kMaxValue - len));
+
+ FixedArray* source_elms = elms;
+
+ // Check if array need to grow.
+ if (new_length > elms->length()) {
+ // New backing storage is needed.
+ int capacity = new_length + (new_length >> 1) + 16;
+ Object* obj = Heap::AllocateFixedArrayWithHoles(capacity);
+ if (obj->IsFailure()) return obj;
+
+ FixedArray* new_elms = FixedArray::cast(obj);
+ mode = new_elms->GetWriteBarrierMode(no_gc);
+
+ // Copy the part before actualStart as is.
+ for (int k = 0; k < actualStart; k++) {
+ new_elms->set(k, elms->get(k), mode);
+ }
+
+ source_elms = elms;
+ elms = new_elms;
+ array->set_elements(elms);
+ }
+
+ for (int k = len - actualDeleteCount; k > actualStart; k--) {
+ elms->set(k + itemCount - 1,
+ GetElementToMove(k + actualDeleteCount - 1,
+ source_elms,
+ prototype),
+ mode);
+ }
+ }
+
+ for (int k = actualStart; k < actualStart + itemCount; k++) {
+ elms->set(k, args[3 + k - actualStart], mode);
+ }
+
+ // Set the length.
+ array->set_length(Smi::FromInt(new_length));
+
+ return result_array;
+}
+
+
// -----------------------------------------------------------------------------
//
@@ -474,6 +806,76 @@ BUILTIN(HandleApiCallConstruct) {
}
+#ifdef DEBUG
+
+static void VerifyTypeCheck(Handle<JSObject> object,
+ Handle<JSFunction> function) {
+ FunctionTemplateInfo* info =
+ FunctionTemplateInfo::cast(function->shared()->function_data());
+ if (info->signature()->IsUndefined()) return;
+ SignatureInfo* signature = SignatureInfo::cast(info->signature());
+ Object* receiver_type = signature->receiver();
+ if (receiver_type->IsUndefined()) return;
+ FunctionTemplateInfo* type = FunctionTemplateInfo::cast(receiver_type);
+ ASSERT(object->IsInstanceOf(type));
+}
+
+#endif
+
+
+BUILTIN(FastHandleApiCall) {
+ ASSERT(!CalledAsConstructor());
+ const bool is_construct = false;
+
+ // We expect four more arguments: function, callback, call data, and holder.
+ const int args_length = args.length() - 4;
+ ASSERT(args_length >= 0);
+
+ Handle<JSFunction> function = args.at<JSFunction>(args_length);
+ Object* callback_obj = args[args_length + 1];
+ Handle<Object> data_handle = args.at<Object>(args_length + 2);
+ Handle<JSObject> checked_holder = args.at<JSObject>(args_length + 3);
+
+#ifdef DEBUG
+ VerifyTypeCheck(checked_holder, function);
+#endif
+
+ v8::Local<v8::Object> holder = v8::Utils::ToLocal(checked_holder);
+ v8::Local<v8::Function> callee = v8::Utils::ToLocal(function);
+ v8::InvocationCallback callback =
+ v8::ToCData<v8::InvocationCallback>(callback_obj);
+ v8::Local<v8::Value> data = v8::Utils::ToLocal(data_handle);
+
+ v8::Arguments new_args = v8::ImplementationUtilities::NewArguments(
+ data,
+ holder,
+ callee,
+ is_construct,
+ reinterpret_cast<void**>(&args[0] - 1),
+ args_length - 1);
+
+ HandleScope scope;
+ Object* result;
+ v8::Handle<v8::Value> value;
+ {
+ // Leaving JavaScript.
+ VMState state(EXTERNAL);
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ state.set_external_callback(v8::ToCData<Address>(callback_obj));
+#endif
+ value = callback(new_args);
+ }
+ if (value.IsEmpty()) {
+ result = Heap::undefined_value();
+ } else {
+ result = *reinterpret_cast<Object**>(*value);
+ }
+
+ RETURN_IF_SCHEDULED_EXCEPTION();
+ return result;
+}
+
+
// Helper function to handle calls to non-function objects created through the
// API. The object can be called as either a constructor (using new) or just as
// a function (without new).
@@ -657,6 +1059,10 @@ static void Generate_KeyedLoadIC_PreMonomorphic(MacroAssembler* masm) {
KeyedLoadIC::GeneratePreMonomorphic(masm);
}
+static void Generate_KeyedLoadIC_IndexedInterceptor(MacroAssembler* masm) {
+ KeyedLoadIC::GenerateIndexedInterceptor(masm);
+}
+
static void Generate_StoreIC_Initialize(MacroAssembler* masm) {
StoreIC::GenerateInitialize(masm);
@@ -668,15 +1074,16 @@ static void Generate_StoreIC_Miss(MacroAssembler* masm) {
}
-static void Generate_StoreIC_ExtendStorage(MacroAssembler* masm) {
- StoreIC::GenerateExtendStorage(masm);
-}
-
static void Generate_StoreIC_Megamorphic(MacroAssembler* masm) {
StoreIC::GenerateMegamorphic(masm);
}
+static void Generate_StoreIC_ArrayLength(MacroAssembler* masm) {
+ StoreIC::GenerateArrayLength(masm);
+}
+
+
static void Generate_KeyedStoreIC_Generic(MacroAssembler* masm) {
KeyedStoreIC::GenerateGeneric(masm);
}
@@ -720,11 +1127,6 @@ static void Generate_KeyedStoreIC_ExternalFloatArray(MacroAssembler* masm) {
}
-static void Generate_KeyedStoreIC_ExtendStorage(MacroAssembler* masm) {
- KeyedStoreIC::GenerateExtendStorage(masm);
-}
-
-
static void Generate_KeyedStoreIC_Miss(MacroAssembler* masm) {
KeyedStoreIC::GenerateMiss(masm);
}
@@ -869,9 +1271,6 @@ void Builtins::Setup(bool create_heap_objects) {
v8::internal::V8::FatalProcessOutOfMemory("CreateCode");
}
}
- // Add any unresolved jumps or calls to the fixup list in the
- // bootstrapper.
- Bootstrapper::AddFixup(Code::cast(code), &masm);
// Log the event and add the code to the builtins array.
LOG(CodeCreateEvent(Logger::BUILTIN_TAG,
Code::cast(code), functions[i].s_name));
diff --git a/deps/v8/src/builtins.h b/deps/v8/src/builtins.h
index 418948f75..595e9a4b8 100644
--- a/deps/v8/src/builtins.h
+++ b/deps/v8/src/builtins.h
@@ -48,8 +48,13 @@ enum BuiltinExtraArguments {
\
V(ArrayPush, NO_EXTRA_ARGUMENTS) \
V(ArrayPop, NO_EXTRA_ARGUMENTS) \
+ V(ArrayShift, NO_EXTRA_ARGUMENTS) \
+ V(ArrayUnshift, NO_EXTRA_ARGUMENTS) \
+ V(ArraySlice, NO_EXTRA_ARGUMENTS) \
+ V(ArraySplice, NO_EXTRA_ARGUMENTS) \
\
V(HandleApiCall, NEEDS_CALLED_FUNCTION) \
+ V(FastHandleApiCall, NO_EXTRA_ARGUMENTS) \
V(HandleApiCallConstruct, NEEDS_CALLED_FUNCTION) \
V(HandleApiCallAsFunction, NO_EXTRA_ARGUMENTS) \
V(HandleApiCallAsConstructor, NO_EXTRA_ARGUMENTS)
@@ -69,9 +74,6 @@ enum BuiltinExtraArguments {
V(StoreIC_Miss, BUILTIN, UNINITIALIZED) \
V(KeyedStoreIC_Miss, BUILTIN, UNINITIALIZED) \
\
- V(StoreIC_ExtendStorage, BUILTIN, UNINITIALIZED) \
- V(KeyedStoreIC_ExtendStorage, BUILTIN, UNINITIALIZED) \
- \
V(LoadIC_Initialize, LOAD_IC, UNINITIALIZED) \
V(LoadIC_PreMonomorphic, LOAD_IC, PREMONOMORPHIC) \
V(LoadIC_Normal, LOAD_IC, MONOMORPHIC) \
@@ -91,8 +93,10 @@ enum BuiltinExtraArguments {
V(KeyedLoadIC_ExternalIntArray, KEYED_LOAD_IC, MEGAMORPHIC) \
V(KeyedLoadIC_ExternalUnsignedIntArray, KEYED_LOAD_IC, MEGAMORPHIC) \
V(KeyedLoadIC_ExternalFloatArray, KEYED_LOAD_IC, MEGAMORPHIC) \
+ V(KeyedLoadIC_IndexedInterceptor, KEYED_LOAD_IC, MEGAMORPHIC) \
\
V(StoreIC_Initialize, STORE_IC, UNINITIALIZED) \
+ V(StoreIC_ArrayLength, STORE_IC, MONOMORPHIC) \
V(StoreIC_Megamorphic, STORE_IC, MEGAMORPHIC) \
\
V(KeyedStoreIC_Initialize, KEYED_STORE_IC, UNINITIALIZED) \
diff --git a/deps/v8/src/checks.h b/deps/v8/src/checks.h
index 3b0c85135..eeb748b4a 100644
--- a/deps/v8/src/checks.h
+++ b/deps/v8/src/checks.h
@@ -125,7 +125,9 @@ static inline void CheckEqualsHelper(const char* file,
const char* expected,
const char* value_source,
const char* value) {
- if (strcmp(expected, value) != 0) {
+ if ((expected == NULL && value != NULL) ||
+ (expected != NULL && value == NULL) ||
+ (expected != NULL && value != NULL && strcmp(expected, value) != 0)) {
V8_Fatal(file, line,
"CHECK_EQ(%s, %s) failed\n# Expected: %s\n# Found: %s",
expected_source, value_source, expected, value);
diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc
index 09581aa82..4d0fd2992 100644
--- a/deps/v8/src/code-stubs.cc
+++ b/deps/v8/src/code-stubs.cc
@@ -31,6 +31,7 @@
#include "code-stubs.h"
#include "factory.h"
#include "macro-assembler.h"
+#include "oprofile-agent.h"
namespace v8 {
namespace internal {
@@ -60,8 +61,12 @@ void CodeStub::GenerateCode(MacroAssembler* masm) {
void CodeStub::RecordCodeGeneration(Code* code, MacroAssembler* masm) {
code->set_major_key(MajorKey());
- // Add unresolved entries in the code to the fixup list.
- Bootstrapper::AddFixup(code, masm);
+#ifdef ENABLE_OPROFILE_AGENT
+ // Register the generated stub with the OPROFILE agent.
+ OProfileAgent::CreateNativeCodeRegion(GetName(),
+ code->instruction_start(),
+ code->instruction_size());
+#endif
LOG(CodeCreateEvent(Logger::STUB_TAG, code, GetName()));
Counters::total_stubs_code_size.Increment(code->instruction_size());
@@ -149,13 +154,16 @@ Object* CodeStub::TryGetCode() {
}
-const char* CodeStub::MajorName(CodeStub::Major major_key) {
+const char* CodeStub::MajorName(CodeStub::Major major_key,
+ bool allow_unknown_keys) {
switch (major_key) {
#define DEF_CASE(name) case name: return #name;
CODE_STUB_LIST(DEF_CASE)
#undef DEF_CASE
default:
- UNREACHABLE();
+ if (!allow_unknown_keys) {
+ UNREACHABLE();
+ }
return NULL;
}
}
diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h
index 16267f64e..3901a6478 100644
--- a/deps/v8/src/code-stubs.h
+++ b/deps/v8/src/code-stubs.h
@@ -55,6 +55,7 @@ namespace internal {
V(CounterOp) \
V(ArgumentsAccess) \
V(RegExpExec) \
+ V(NumberToString) \
V(CEntry) \
V(JSEntry) \
V(DebuggerStatement)
@@ -100,7 +101,7 @@ class CodeStub BASE_EMBEDDED {
static int MinorKeyFromKey(uint32_t key) {
return MinorKeyBits::decode(key);
};
- static const char* MajorName(Major major_key);
+ static const char* MajorName(Major major_key, bool allow_unknown_keys);
virtual ~CodeStub() {}
@@ -138,7 +139,7 @@ class CodeStub BASE_EMBEDDED {
virtual InLoopFlag InLoop() { return NOT_IN_LOOP; }
// Returns a name for logging/debugging purposes.
- virtual const char* GetName() { return MajorName(MajorKey()); }
+ virtual const char* GetName() { return MajorName(MajorKey(), false); }
#ifdef DEBUG
virtual void Print() { PrintF("%s\n", GetName()); }
diff --git a/deps/v8/src/codegen-inl.h b/deps/v8/src/codegen-inl.h
index bee237d8c..da8cbf703 100644
--- a/deps/v8/src/codegen-inl.h
+++ b/deps/v8/src/codegen-inl.h
@@ -30,6 +30,7 @@
#define V8_CODEGEN_INL_H_
#include "codegen.h"
+#include "compiler.h"
#include "register-allocator-inl.h"
#if V8_TARGET_ARCH_IA32
@@ -38,6 +39,8 @@
#include "x64/codegen-x64-inl.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/codegen-arm-inl.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "mips/codegen-mips-inl.h"
#else
#error Unsupported target architecture.
#endif
@@ -46,42 +49,8 @@
namespace v8 {
namespace internal {
-#define __ ACCESS_MASM(masm_)
-
-// -----------------------------------------------------------------------------
-// Support for "structured" code comments.
-//
-// By selecting matching brackets in disassembler output,
-// code segments can be identified more easily.
-
-#ifdef DEBUG
-
-class Comment BASE_EMBEDDED {
- public:
- Comment(MacroAssembler* masm, const char* msg) : masm_(masm), msg_(msg) {
- __ RecordComment(msg);
- }
-
- ~Comment() {
- if (msg_[0] == '[') __ RecordComment("]");
- }
-
- private:
- MacroAssembler* masm_;
- const char* msg_;
-};
-
-#else
-
-class Comment BASE_EMBEDDED {
- public:
- Comment(MacroAssembler*, const char*) {}
-};
-
-#endif // DEBUG
-
-#undef __
-
+Handle<Script> CodeGenerator::script() { return info_->script(); }
+bool CodeGenerator::is_eval() { return info_->is_eval(); }
} } // namespace v8::internal
diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc
index cb6089b8b..01ee6d072 100644
--- a/deps/v8/src/codegen.cc
+++ b/deps/v8/src/codegen.cc
@@ -31,6 +31,7 @@
#include "codegen-inl.h"
#include "compiler.h"
#include "debug.h"
+#include "liveedit.h"
#include "oprofile-agent.h"
#include "prettyprinter.h"
#include "register-allocator-inl.h"
@@ -42,6 +43,24 @@
namespace v8 {
namespace internal {
+#define __ ACCESS_MASM(masm_)
+
+#ifdef DEBUG
+
+Comment::Comment(MacroAssembler* masm, const char* msg)
+ : masm_(masm), msg_(msg) {
+ __ RecordComment(msg);
+}
+
+
+Comment::~Comment() {
+ if (msg_[0] == '[') __ RecordComment("]");
+}
+
+#endif // DEBUG
+
+#undef __
+
CodeGenerator* CodeGeneratorScope::top_ = NULL;
@@ -126,7 +145,7 @@ void CodeGenerator::DeleteFrame() {
}
-void CodeGenerator::MakeCodePrologue(FunctionLiteral* fun) {
+void CodeGenerator::MakeCodePrologue(CompilationInfo* info) {
#ifdef DEBUG
bool print_source = false;
bool print_ast = false;
@@ -147,60 +166,61 @@ void CodeGenerator::MakeCodePrologue(FunctionLiteral* fun) {
if (FLAG_trace_codegen || print_source || print_ast) {
PrintF("*** Generate code for %s function: ", ftype);
- fun->name()->ShortPrint();
+ info->function()->name()->ShortPrint();
PrintF(" ***\n");
}
if (print_source) {
- PrintF("--- Source from AST ---\n%s\n", PrettyPrinter().PrintProgram(fun));
+ PrintF("--- Source from AST ---\n%s\n",
+ PrettyPrinter().PrintProgram(info->function()));
}
if (print_ast) {
- PrintF("--- AST ---\n%s\n", AstPrinter().PrintProgram(fun));
+ PrintF("--- AST ---\n%s\n",
+ AstPrinter().PrintProgram(info->function()));
}
if (print_json_ast) {
JsonAstBuilder builder;
- PrintF("%s", builder.BuildProgram(fun));
+ PrintF("%s", builder.BuildProgram(info->function()));
}
#endif // DEBUG
}
-Handle<Code> CodeGenerator::MakeCodeEpilogue(FunctionLiteral* fun,
- MacroAssembler* masm,
+Handle<Code> CodeGenerator::MakeCodeEpilogue(MacroAssembler* masm,
Code::Flags flags,
- Handle<Script> script) {
+ CompilationInfo* info) {
// Allocate and install the code.
CodeDesc desc;
masm->GetCode(&desc);
- ZoneScopeInfo sinfo(fun->scope());
+ ZoneScopeInfo sinfo(info->scope());
Handle<Code> code =
Factory::NewCode(desc, &sinfo, flags, masm->CodeObject());
- // Add unresolved entries in the code to the fixup list.
- Bootstrapper::AddFixup(*code, masm);
-
#ifdef ENABLE_DISASSEMBLER
bool print_code = Bootstrapper::IsActive()
? FLAG_print_builtin_code
: FLAG_print_code;
if (print_code) {
// Print the source code if available.
+ Handle<Script> script = info->script();
+ FunctionLiteral* function = info->function();
if (!script->IsUndefined() && !script->source()->IsUndefined()) {
PrintF("--- Raw source ---\n");
StringInputBuffer stream(String::cast(script->source()));
- stream.Seek(fun->start_position());
+ stream.Seek(function->start_position());
// fun->end_position() points to the last character in the stream. We
// need to compensate by adding one to calculate the length.
- int source_len = fun->end_position() - fun->start_position() + 1;
+ int source_len =
+ function->end_position() - function->start_position() + 1;
for (int i = 0; i < source_len; i++) {
if (stream.has_more()) PrintF("%c", stream.GetNext());
}
PrintF("\n\n");
}
PrintF("--- Code ---\n");
- code->Disassemble(*fun->name()->ToCString());
+ code->Disassemble(*function->name()->ToCString());
}
#endif // ENABLE_DISASSEMBLER
@@ -214,21 +234,21 @@ Handle<Code> CodeGenerator::MakeCodeEpilogue(FunctionLiteral* fun,
// Generate the code. Takes a function literal, generates code for it, assemble
// all the pieces into a Code object. This function is only to be called by
// the compiler.cc code.
-Handle<Code> CodeGenerator::MakeCode(FunctionLiteral* fun,
- Handle<Script> script,
- bool is_eval,
- CompilationInfo* info) {
+Handle<Code> CodeGenerator::MakeCode(CompilationInfo* info) {
+ LiveEditFunctionTracker live_edit_tracker(info->function());
+ Handle<Script> script = info->script();
if (!script->IsUndefined() && !script->source()->IsUndefined()) {
int len = String::cast(script->source())->length();
Counters::total_old_codegen_source_size.Increment(len);
}
- MakeCodePrologue(fun);
+ MakeCodePrologue(info);
// Generate code.
const int kInitialBufferSize = 4 * KB;
MacroAssembler masm(NULL, kInitialBufferSize);
- CodeGenerator cgen(&masm, script, is_eval);
+ CodeGenerator cgen(&masm);
CodeGeneratorScope scope(&cgen);
- cgen.Generate(fun, PRIMARY, info);
+ live_edit_tracker.RecordFunctionScope(info->function()->scope());
+ cgen.Generate(info, PRIMARY);
if (cgen.HasStackOverflow()) {
ASSERT(!Top::has_pending_exception());
return Handle<Code>::null();
@@ -236,7 +256,9 @@ Handle<Code> CodeGenerator::MakeCode(FunctionLiteral* fun,
InLoopFlag in_loop = (cgen.loop_nesting() != 0) ? IN_LOOP : NOT_IN_LOOP;
Code::Flags flags = Code::ComputeFlags(Code::FUNCTION, in_loop);
- return MakeCodeEpilogue(fun, cgen.masm(), flags, script);
+ Handle<Code> result = MakeCodeEpilogue(cgen.masm(), flags, info);
+ live_edit_tracker.RecordFunctionCode(result);
+ return result;
}
@@ -355,6 +377,7 @@ CodeGenerator::InlineRuntimeLUT CodeGenerator::kInlineRuntimeLUT[] = {
{&CodeGenerator::GenerateSubString, "_SubString"},
{&CodeGenerator::GenerateStringCompare, "_StringCompare"},
{&CodeGenerator::GenerateRegExpExec, "_RegExpExec"},
+ {&CodeGenerator::GenerateNumberToString, "_NumberToString"},
};
@@ -506,10 +529,4 @@ void ApiGetterEntryStub::SetCustomCache(Code* value) {
}
-void DebuggerStatementStub::Generate(MacroAssembler* masm) {
- Runtime::Function* f = Runtime::FunctionForId(Runtime::kDebugBreak);
- masm->TailCallRuntime(ExternalReference(f), 0, f->result_size);
-}
-
-
} } // namespace v8::internal
diff --git a/deps/v8/src/codegen.h b/deps/v8/src/codegen.h
index d0be5f1b1..5c10cb62c 100644
--- a/deps/v8/src/codegen.h
+++ b/deps/v8/src/codegen.h
@@ -31,6 +31,7 @@
#include "ast.h"
#include "code-stubs.h"
#include "runtime.h"
+#include "number-info.h"
// Include the declaration of the architecture defined class CodeGenerator.
// The contract to the shared code is that the the CodeGenerator is a subclass
@@ -86,6 +87,8 @@ enum UncatchableExceptionType { OUT_OF_MEMORY, TERMINATION };
#include "x64/codegen-x64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/codegen-arm.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "mips/codegen-mips.h"
#else
#error Unsupported target architecture.
#endif
@@ -96,6 +99,29 @@ namespace v8 {
namespace internal {
+// Support for "structured" code comments.
+#ifdef DEBUG
+
+class Comment BASE_EMBEDDED {
+ public:
+ Comment(MacroAssembler* masm, const char* msg);
+ ~Comment();
+
+ private:
+ MacroAssembler* masm_;
+ const char* msg_;
+};
+
+#else
+
+class Comment BASE_EMBEDDED {
+ public:
+ Comment(MacroAssembler*, const char*) {}
+};
+
+#endif // DEBUG
+
+
// Code generation can be nested. Code generation scopes form a stack
// of active code generators.
class CodeGeneratorScope BASE_EMBEDDED {
@@ -390,21 +416,6 @@ class ApiGetterEntryStub : public CodeStub {
};
-// Mark the debugger statement to be recognized by debugger (by the MajorKey)
-class DebuggerStatementStub : public CodeStub {
- public:
- DebuggerStatementStub() { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- Major MajorKey() { return DebuggerStatement; }
- int MinorKey() { return 0; }
-
- const char* GetName() { return "DebuggerStatementStub"; }
-};
-
-
class JSEntryStub : public CodeStub {
public:
JSEntryStub() { }
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index a5e1e5c88..557a91e4d 100755
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -38,20 +38,17 @@
#include "rewriter.h"
#include "scopes.h"
#include "usage-analyzer.h"
+#include "liveedit.h"
namespace v8 {
namespace internal {
-static Handle<Code> MakeCode(FunctionLiteral* literal,
- Handle<Script> script,
- Handle<Context> context,
- bool is_eval,
- CompilationInfo* info) {
- ASSERT(literal != NULL);
-
+static Handle<Code> MakeCode(Handle<Context> context, CompilationInfo* info) {
+ FunctionLiteral* function = info->function();
+ ASSERT(function != NULL);
// Rewrite the AST by introducing .result assignments where needed.
- if (!Rewriter::Process(literal) || !AnalyzeVariableUsage(literal)) {
+ if (!Rewriter::Process(function) || !AnalyzeVariableUsage(function)) {
// Signal a stack overflow by returning a null handle. The stack
// overflow exception will be thrown by the caller.
return Handle<Code>::null();
@@ -62,7 +59,7 @@ static Handle<Code> MakeCode(FunctionLiteral* literal,
// the top scope only contains the single lazily compiled function,
// so this doesn't re-allocate variables repeatedly.
HistogramTimerScope timer(&Counters::variable_allocation);
- Scope* top = literal->scope();
+ Scope* top = info->scope();
while (top->outer_scope() != NULL) top = top->outer_scope();
top->AllocateVariables(context);
}
@@ -71,12 +68,12 @@ static Handle<Code> MakeCode(FunctionLiteral* literal,
if (Bootstrapper::IsActive() ?
FLAG_print_builtin_scopes :
FLAG_print_scopes) {
- literal->scope()->Print();
+ info->scope()->Print();
}
#endif
// Optimize the AST.
- if (!Rewriter::Optimize(literal)) {
+ if (!Rewriter::Optimize(function)) {
// Signal a stack overflow by returning a null handle. The stack
// overflow exception will be thrown by the caller.
return Handle<Code>::null();
@@ -98,25 +95,25 @@ static Handle<Code> MakeCode(FunctionLiteral* literal,
Handle<SharedFunctionInfo> shared = info->shared_info();
bool is_run_once = (shared.is_null())
- ? literal->scope()->is_global_scope()
+ ? info->scope()->is_global_scope()
: (shared->is_toplevel() || shared->try_full_codegen());
if (FLAG_always_full_compiler || (FLAG_full_compiler && is_run_once)) {
FullCodeGenSyntaxChecker checker;
- checker.Check(literal);
+ checker.Check(function);
if (checker.has_supported_syntax()) {
- return FullCodeGenerator::MakeCode(literal, script, is_eval);
+ return FullCodeGenerator::MakeCode(info);
}
} else if (FLAG_always_fast_compiler ||
(FLAG_fast_compiler && !is_run_once)) {
FastCodeGenSyntaxChecker checker;
- checker.Check(literal, info);
+ checker.Check(info);
if (checker.has_supported_syntax()) {
- return FastCodeGenerator::MakeCode(literal, script, is_eval, info);
+ return FastCodeGenerator::MakeCode(info);
}
}
- return CodeGenerator::MakeCode(literal, script, is_eval, info);
+ return CodeGenerator::MakeCode(info);
}
@@ -180,10 +177,8 @@ static Handle<JSFunction> MakeFunction(bool is_global,
HistogramTimerScope timer(rate);
// Compile the code.
- CompilationInfo info(Handle<SharedFunctionInfo>::null(),
- Handle<Object>::null(), // No receiver.
- 0); // Not nested in a loop.
- Handle<Code> code = MakeCode(lit, script, context, is_eval, &info);
+ CompilationInfo info(lit, script, is_eval);
+ Handle<Code> code = MakeCode(context, &info);
// Check for stack-overflow exceptions.
if (code.is_null()) {
@@ -243,7 +238,8 @@ Handle<JSFunction> Compiler::Compile(Handle<String> source,
Handle<Object> script_name,
int line_offset, int column_offset,
v8::Extension* extension,
- ScriptDataImpl* input_pre_data) {
+ ScriptDataImpl* input_pre_data,
+ Handle<Object> script_data) {
int source_length = source->length();
Counters::total_load_size.Increment(source_length);
Counters::total_compile_size.Increment(source_length);
@@ -277,6 +273,9 @@ Handle<JSFunction> Compiler::Compile(Handle<String> source,
script->set_column_offset(Smi::FromInt(column_offset));
}
+ script->set_data(script_data.is_null() ? Heap::undefined_value()
+ : *script_data);
+
// Compile the function and add it to the cache.
result = MakeFunction(true,
false,
@@ -355,7 +354,6 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
// Compute name, source code and script data.
Handle<SharedFunctionInfo> shared = info->shared_info();
Handle<String> name(String::cast(shared->name()));
- Handle<Script> script(Script::cast(shared->script()));
int start_position = shared->start_position();
int end_position = shared->end_position();
@@ -364,7 +362,8 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
// Generate the AST for the lazily compiled function. The AST may be
// NULL in case of parser stack overflow.
- FunctionLiteral* lit = MakeLazyAST(script, name,
+ FunctionLiteral* lit = MakeLazyAST(info->script(),
+ name,
start_position,
end_position,
is_expression);
@@ -374,6 +373,7 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
ASSERT(Top::has_pending_exception());
return false;
}
+ info->set_function(lit);
// Measure how long it takes to do the lazy compilation; only take
// the rest of the function into account to avoid overlap with the
@@ -381,11 +381,7 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
HistogramTimerScope timer(&Counters::compile_lazy);
// Compile the code.
- Handle<Code> code = MakeCode(lit,
- script,
- Handle<Context>::null(),
- false,
- info);
+ Handle<Code> code = MakeCode(Handle<Context>::null(), info);
// Check for stack-overflow exception.
if (code.is_null()) {
@@ -394,28 +390,12 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
}
#if defined ENABLE_LOGGING_AND_PROFILING || defined ENABLE_OPROFILE_AGENT
- // Log the code generation. If source information is available include script
- // name and line number. Check explicit whether logging is enabled as finding
- // the line number is not for free.
- if (Logger::is_logging() || OProfileAgent::is_enabled()) {
- Handle<String> func_name(name->length() > 0 ?
- *name : shared->inferred_name());
- if (script->name()->IsString()) {
- int line_num = GetScriptLineNumber(script, start_position) + 1;
- LOG(CodeCreateEvent(Logger::LAZY_COMPILE_TAG, *code, *func_name,
- String::cast(script->name()), line_num));
- OProfileAgent::CreateNativeCodeRegion(*func_name,
- String::cast(script->name()),
- line_num,
- code->instruction_start(),
- code->instruction_size());
- } else {
- LOG(CodeCreateEvent(Logger::LAZY_COMPILE_TAG, *code, *func_name));
- OProfileAgent::CreateNativeCodeRegion(*func_name,
- code->instruction_start(),
- code->instruction_size());
- }
- }
+ LogCodeCreateEvent(Logger::LAZY_COMPILE_TAG,
+ name,
+ Handle<String>(shared->inferred_name()),
+ start_position,
+ info->script(),
+ code);
#endif
// Update the shared function info with the compiled code.
@@ -450,7 +430,8 @@ Handle<JSFunction> Compiler::BuildBoilerplate(FunctionLiteral* literal,
// compiled. These builtins cannot be handled lazily by the parser,
// since we have to know if a function uses the special natives
// syntax, which is something the parser records.
- bool allow_lazy = literal->AllowsLazyCompilation();
+ bool allow_lazy = literal->AllowsLazyCompilation() &&
+ !LiveEditFunctionTracker::IsActive();
// Generate code
Handle<Code> code;
@@ -466,9 +447,7 @@ Handle<JSFunction> Compiler::BuildBoilerplate(FunctionLiteral* literal,
// Generate code and return it. The way that the compilation mode
// is controlled by the command-line flags is described in
// the static helper function MakeCode.
- CompilationInfo info(Handle<SharedFunctionInfo>::null(),
- Handle<Object>::null(), // No receiver.
- 0); // Not nested in a loop.
+ CompilationInfo info(literal, script, false);
CHECK(!FLAG_always_full_compiler || !FLAG_always_fast_compiler);
bool is_run_once = literal->try_full_codegen();
@@ -477,9 +456,7 @@ Handle<JSFunction> Compiler::BuildBoilerplate(FunctionLiteral* literal,
FullCodeGenSyntaxChecker checker;
checker.Check(literal);
if (checker.has_supported_syntax()) {
- code = FullCodeGenerator::MakeCode(literal,
- script,
- false); // Not eval.
+ code = FullCodeGenerator::MakeCode(&info);
is_compiled = true;
}
} else if (FLAG_always_fast_compiler ||
@@ -487,19 +464,16 @@ Handle<JSFunction> Compiler::BuildBoilerplate(FunctionLiteral* literal,
// Since we are not lazily compiling we do not have a receiver to
// specialize for.
FastCodeGenSyntaxChecker checker;
- checker.Check(literal, &info);
+ checker.Check(&info);
if (checker.has_supported_syntax()) {
- code = FastCodeGenerator::MakeCode(literal, script, false, &info);
+ code = FastCodeGenerator::MakeCode(&info);
is_compiled = true;
}
}
if (!is_compiled) {
// We fall back to the classic V8 code generator.
- code = CodeGenerator::MakeCode(literal,
- script,
- false, // Not eval.
- &info);
+ code = CodeGenerator::MakeCode(&info);
}
// Check for stack-overflow exception.
@@ -509,12 +483,14 @@ Handle<JSFunction> Compiler::BuildBoilerplate(FunctionLiteral* literal,
}
// Function compilation complete.
- LOG(CodeCreateEvent(Logger::FUNCTION_TAG, *code, *literal->name()));
-#ifdef ENABLE_OPROFILE_AGENT
- OProfileAgent::CreateNativeCodeRegion(*literal->name(),
- code->instruction_start(),
- code->instruction_size());
+#if defined ENABLE_LOGGING_AND_PROFILING || defined ENABLE_OPROFILE_AGENT
+ LogCodeCreateEvent(Logger::FUNCTION_TAG,
+ literal->name(),
+ literal->inferred_name(),
+ literal->start_position(),
+ script,
+ code);
#endif
}
@@ -562,4 +538,35 @@ void Compiler::SetFunctionInfo(Handle<JSFunction> fun,
}
+#if defined ENABLE_LOGGING_AND_PROFILING || defined ENABLE_OPROFILE_AGENT
+void Compiler::LogCodeCreateEvent(Logger::LogEventsAndTags tag,
+ Handle<String> name,
+ Handle<String> inferred_name,
+ int start_position,
+ Handle<Script> script,
+ Handle<Code> code) {
+ // Log the code generation. If source information is available
+ // include script name and line number. Check explicitly whether
+ // logging is enabled as finding the line number is not free.
+ if (Logger::is_logging() || OProfileAgent::is_enabled()) {
+ Handle<String> func_name(name->length() > 0 ? *name : *inferred_name);
+ if (script->name()->IsString()) {
+ int line_num = GetScriptLineNumber(script, start_position) + 1;
+ LOG(CodeCreateEvent(tag, *code, *func_name,
+ String::cast(script->name()), line_num));
+ OProfileAgent::CreateNativeCodeRegion(*func_name,
+ String::cast(script->name()),
+ line_num,
+ code->instruction_start(),
+ code->instruction_size());
+ } else {
+ LOG(CodeCreateEvent(tag, *code, *func_name));
+ OProfileAgent::CreateNativeCodeRegion(*func_name,
+ code->instruction_start(),
+ code->instruction_size());
+ }
+ }
+}
+#endif
+
} } // namespace v8::internal
diff --git a/deps/v8/src/compiler.h b/deps/v8/src/compiler.h
index 19499de71..6ee2246b2 100644
--- a/deps/v8/src/compiler.h
+++ b/deps/v8/src/compiler.h
@@ -28,45 +28,136 @@
#ifndef V8_COMPILER_H_
#define V8_COMPILER_H_
+#include "ast.h"
#include "frame-element.h"
#include "parser.h"
+#include "register-allocator.h"
#include "zone.h"
namespace v8 {
namespace internal {
-// CompilationInfo encapsulates some information known at compile time.
+// CompilationInfo encapsulates some information known at compile time. It
+// is constructed based on the resources available at compile-time.
class CompilationInfo BASE_EMBEDDED {
public:
- CompilationInfo(Handle<SharedFunctionInfo> shared_info,
- Handle<Object> receiver,
- int loop_nesting)
- : shared_info_(shared_info),
- receiver_(receiver),
+ // Lazy compilation of a JSFunction.
+ CompilationInfo(Handle<JSFunction> closure,
+ int loop_nesting,
+ Handle<Object> receiver)
+ : closure_(closure),
+ function_(NULL),
+ is_eval_(false),
loop_nesting_(loop_nesting),
- has_this_properties_(false),
- has_globals_(false) {
+ receiver_(receiver) {
+ Initialize();
+ ASSERT(!closure_.is_null() &&
+ shared_info_.is_null() &&
+ script_.is_null());
}
- Handle<SharedFunctionInfo> shared_info() { return shared_info_; }
+ // Lazy compilation based on SharedFunctionInfo.
+ explicit CompilationInfo(Handle<SharedFunctionInfo> shared_info)
+ : shared_info_(shared_info),
+ function_(NULL),
+ is_eval_(false),
+ loop_nesting_(0) {
+ Initialize();
+ ASSERT(closure_.is_null() &&
+ !shared_info_.is_null() &&
+ script_.is_null());
+ }
- bool has_receiver() { return !receiver_.is_null(); }
- Handle<Object> receiver() { return receiver_; }
+ // Eager compilation.
+ CompilationInfo(FunctionLiteral* literal, Handle<Script> script, bool is_eval)
+ : script_(script),
+ function_(literal),
+ is_eval_(is_eval),
+ loop_nesting_(0) {
+ Initialize();
+ ASSERT(closure_.is_null() &&
+ shared_info_.is_null() &&
+ !script_.is_null());
+ }
+
+ // We can only get a JSFunction if we actually have one.
+ Handle<JSFunction> closure() { return closure_; }
+ // We can get a SharedFunctionInfo from a JSFunction or if we actually
+ // have one.
+ Handle<SharedFunctionInfo> shared_info() {
+ if (!closure().is_null()) {
+ return Handle<SharedFunctionInfo>(closure()->shared());
+ } else {
+ return shared_info_;
+ }
+ }
+
+ // We can always get a script. Either we have one or we can get a shared
+ // function info.
+ Handle<Script> script() {
+ if (!script_.is_null()) {
+ return script_;
+ } else {
+ ASSERT(shared_info()->script()->IsScript());
+ return Handle<Script>(Script::cast(shared_info()->script()));
+ }
+ }
+
+ // There should always be a function literal, but it may be set after
+ // construction (for lazy compilation).
+ FunctionLiteral* function() { return function_; }
+ void set_function(FunctionLiteral* literal) {
+ ASSERT(function_ == NULL);
+ function_ = literal;
+ }
+
+ // Simple accessors.
+ bool is_eval() { return is_eval_; }
int loop_nesting() { return loop_nesting_; }
+ bool has_receiver() { return !receiver_.is_null(); }
+ Handle<Object> receiver() { return receiver_; }
+ // Accessors for mutable fields, possibly set by analysis passes with
+ // default values given by Initialize.
bool has_this_properties() { return has_this_properties_; }
void set_has_this_properties(bool flag) { has_this_properties_ = flag; }
+ bool has_global_object() {
+ return !closure().is_null() && (closure()->context()->global() != NULL);
+ }
+
+ GlobalObject* global_object() {
+ return has_global_object() ? closure()->context()->global() : NULL;
+ }
+
bool has_globals() { return has_globals_; }
void set_has_globals(bool flag) { has_globals_ = flag; }
+ // Derived accessors.
+ Scope* scope() { return function()->scope(); }
+
private:
+ void Initialize() {
+ has_this_properties_ = false;
+ has_globals_ = false;
+ }
+
+ Handle<JSFunction> closure_;
Handle<SharedFunctionInfo> shared_info_;
- Handle<Object> receiver_;
+ Handle<Script> script_;
+
+ FunctionLiteral* function_;
+
+ bool is_eval_;
int loop_nesting_;
+
+ Handle<Object> receiver_;
+
bool has_this_properties_;
bool has_globals_;
+
+ DISALLOW_COPY_AND_ASSIGN(CompilationInfo);
};
@@ -94,7 +185,8 @@ class Compiler : public AllStatic {
Handle<Object> script_name,
int line_offset, int column_offset,
v8::Extension* extension,
- ScriptDataImpl* script_Data);
+ ScriptDataImpl* pre_data,
+ Handle<Object> script_data);
// Compile a String source within a context for Eval.
static Handle<JSFunction> CompileEval(Handle<String> source,
@@ -119,6 +211,17 @@ class Compiler : public AllStatic {
FunctionLiteral* lit,
bool is_toplevel,
Handle<Script> script);
+
+ private:
+
+#if defined ENABLE_LOGGING_AND_PROFILING || defined ENABLE_OPROFILE_AGENT
+ static void LogCodeCreateEvent(Logger::LogEventsAndTags tag,
+ Handle<String> name,
+ Handle<String> inferred_name,
+ int start_position,
+ Handle<Script> script,
+ Handle<Code> code);
+#endif
};
diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h
index 66c157595..9baf07211 100644
--- a/deps/v8/src/contexts.h
+++ b/deps/v8/src/contexts.h
@@ -76,7 +76,6 @@ enum ContextLookupFlags {
V(TO_INTEGER_FUN_INDEX, JSFunction, to_integer_fun) \
V(TO_UINT32_FUN_INDEX, JSFunction, to_uint32_fun) \
V(TO_INT32_FUN_INDEX, JSFunction, to_int32_fun) \
- V(TO_BOOLEAN_FUN_INDEX, JSFunction, to_boolean_fun) \
V(GLOBAL_EVAL_FUN_INDEX, JSFunction, global_eval_fun) \
V(INSTANTIATE_FUN_INDEX, JSFunction, instantiate_fun) \
V(CONFIGURE_INSTANCE_FUN_INDEX, JSFunction, configure_instance_fun) \
diff --git a/deps/v8/src/d8-readline.cc b/deps/v8/src/d8-readline.cc
index 34b7b60df..67fc9eff7 100644
--- a/deps/v8/src/d8-readline.cc
+++ b/deps/v8/src/d8-readline.cc
@@ -27,8 +27,8 @@
#include <cstdio> // NOLINT
-#include <readline/readline.h>
-#include <readline/history.h>
+#include <readline/readline.h> // NOLINT
+#include <readline/history.h> // NOLINT
#include "d8.h"
diff --git a/deps/v8/src/data-flow.cc b/deps/v8/src/data-flow.cc
index 0e30b3151..5e9d217d2 100644
--- a/deps/v8/src/data-flow.cc
+++ b/deps/v8/src/data-flow.cc
@@ -33,8 +33,9 @@ namespace v8 {
namespace internal {
-void AstLabeler::Label(FunctionLiteral* fun) {
- VisitStatements(fun->body());
+void AstLabeler::Label(CompilationInfo* info) {
+ info_ = info;
+ VisitStatements(info_->function()->body());
}
@@ -162,6 +163,10 @@ void AstLabeler::VisitSlot(Slot* expr) {
void AstLabeler::VisitVariableProxy(VariableProxy* expr) {
expr->set_num(next_number_++);
+ Variable* var = expr->var();
+ if (var->is_global() && !var->is_this()) {
+ info_->set_has_globals(true);
+ }
}
@@ -194,15 +199,11 @@ void AstLabeler::VisitCatchExtensionObject(
void AstLabeler::VisitAssignment(Assignment* expr) {
Property* prop = expr->target()->AsProperty();
ASSERT(prop != NULL);
- if (prop != NULL) {
- ASSERT(prop->key()->IsPropertyName());
- VariableProxy* proxy = prop->obj()->AsVariableProxy();
- if (proxy != NULL && proxy->var()->is_this()) {
- has_this_properties_ = true;
- } else {
- Visit(prop->obj());
- }
- }
+ ASSERT(prop->key()->IsPropertyName());
+ VariableProxy* proxy = prop->obj()->AsVariableProxy();
+ USE(proxy);
+ ASSERT(proxy != NULL && proxy->var()->is_this());
+ info()->set_has_this_properties(true);
Visit(expr->value());
expr->set_num(next_number_++);
}
@@ -214,7 +215,12 @@ void AstLabeler::VisitThrow(Throw* expr) {
void AstLabeler::VisitProperty(Property* expr) {
- UNREACHABLE();
+ ASSERT(expr->key()->IsPropertyName());
+ VariableProxy* proxy = expr->obj()->AsVariableProxy();
+ USE(proxy);
+ ASSERT(proxy != NULL && proxy->var()->is_this());
+ info()->set_has_this_properties(true);
+ expr->set_num(next_number_++);
}
@@ -264,4 +270,292 @@ void AstLabeler::VisitDeclaration(Declaration* decl) {
UNREACHABLE();
}
+
+ZoneList<Expression*>* VarUseMap::Lookup(Variable* var) {
+ HashMap::Entry* entry = HashMap::Lookup(var, var->name()->Hash(), true);
+ if (entry->value == NULL) {
+ entry->value = new ZoneList<Expression*>(1);
+ }
+ return reinterpret_cast<ZoneList<Expression*>*>(entry->value);
+}
+
+
+void LivenessAnalyzer::Analyze(FunctionLiteral* fun) {
+ // Process the function body.
+ VisitStatements(fun->body());
+
+ // All variables are implicitly defined at the function start.
+ // Record a definition of all variables live at function entry.
+ for (HashMap::Entry* p = live_vars_.Start();
+ p != NULL;
+ p = live_vars_.Next(p)) {
+ Variable* var = reinterpret_cast<Variable*>(p->key);
+ RecordDef(var, fun);
+ }
+}
+
+
+void LivenessAnalyzer::VisitStatements(ZoneList<Statement*>* stmts) {
+ // Visit statements right-to-left.
+ for (int i = stmts->length() - 1; i >= 0; i--) {
+ Visit(stmts->at(i));
+ }
+}
+
+
+void LivenessAnalyzer::RecordUse(Variable* var, Expression* expr) {
+ ASSERT(var->is_global() || var->is_this());
+ ZoneList<Expression*>* uses = live_vars_.Lookup(var);
+ uses->Add(expr);
+}
+
+
+void LivenessAnalyzer::RecordDef(Variable* var, Expression* expr) {
+ ASSERT(var->is_global() || var->is_this());
+
+ // We do not support other expressions that can define variables.
+ ASSERT(expr->AsFunctionLiteral() != NULL);
+
+ // Add the variable to the list of defined variables.
+ if (expr->defined_vars() == NULL) {
+ expr->set_defined_vars(new ZoneList<DefinitionInfo*>(1));
+ }
+ DefinitionInfo* def = new DefinitionInfo();
+ expr->AsFunctionLiteral()->defined_vars()->Add(def);
+
+ // Compute the last use of the definition. The variable uses are
+ // inserted in reversed evaluation order. The first element
+ // in the list of live uses is the last use.
+ ZoneList<Expression*>* uses = live_vars_.Lookup(var);
+ while (uses->length() > 0) {
+ Expression* use_site = uses->RemoveLast();
+ use_site->set_var_def(def);
+ if (uses->length() == 0) {
+ def->set_last_use(use_site);
+ }
+ }
+}
+
+
+// Visitor functions for live variable analysis.
+void LivenessAnalyzer::VisitDeclaration(Declaration* decl) {
+ UNREACHABLE();
+}
+
+
+void LivenessAnalyzer::VisitBlock(Block* stmt) {
+ VisitStatements(stmt->statements());
+}
+
+
+void LivenessAnalyzer::VisitExpressionStatement(
+ ExpressionStatement* stmt) {
+ Visit(stmt->expression());
+}
+
+
+void LivenessAnalyzer::VisitEmptyStatement(EmptyStatement* stmt) {
+ // Do nothing.
+}
+
+
+void LivenessAnalyzer::VisitIfStatement(IfStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void LivenessAnalyzer::VisitContinueStatement(ContinueStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void LivenessAnalyzer::VisitBreakStatement(BreakStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void LivenessAnalyzer::VisitReturnStatement(ReturnStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void LivenessAnalyzer::VisitWithEnterStatement(
+ WithEnterStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void LivenessAnalyzer::VisitWithExitStatement(WithExitStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void LivenessAnalyzer::VisitSwitchStatement(SwitchStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void LivenessAnalyzer::VisitDoWhileStatement(DoWhileStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void LivenessAnalyzer::VisitWhileStatement(WhileStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void LivenessAnalyzer::VisitForStatement(ForStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void LivenessAnalyzer::VisitForInStatement(ForInStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void LivenessAnalyzer::VisitTryCatchStatement(TryCatchStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void LivenessAnalyzer::VisitTryFinallyStatement(
+ TryFinallyStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void LivenessAnalyzer::VisitDebuggerStatement(
+ DebuggerStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void LivenessAnalyzer::VisitFunctionLiteral(FunctionLiteral* expr) {
+ UNREACHABLE();
+}
+
+
+void LivenessAnalyzer::VisitFunctionBoilerplateLiteral(
+ FunctionBoilerplateLiteral* expr) {
+ UNREACHABLE();
+}
+
+
+void LivenessAnalyzer::VisitConditional(Conditional* expr) {
+ UNREACHABLE();
+}
+
+
+void LivenessAnalyzer::VisitSlot(Slot* expr) {
+ UNREACHABLE();
+}
+
+
+void LivenessAnalyzer::VisitVariableProxy(VariableProxy* expr) {
+ Variable* var = expr->var();
+ ASSERT(var->is_global());
+ ASSERT(!var->is_this());
+ RecordUse(var, expr);
+}
+
+
+void LivenessAnalyzer::VisitLiteral(Literal* expr) {
+ UNREACHABLE();
+}
+
+
+void LivenessAnalyzer::VisitRegExpLiteral(RegExpLiteral* expr) {
+ UNREACHABLE();
+}
+
+
+void LivenessAnalyzer::VisitObjectLiteral(ObjectLiteral* expr) {
+ UNREACHABLE();
+}
+
+
+void LivenessAnalyzer::VisitArrayLiteral(ArrayLiteral* expr) {
+ UNREACHABLE();
+}
+
+
+void LivenessAnalyzer::VisitCatchExtensionObject(
+ CatchExtensionObject* expr) {
+ UNREACHABLE();
+}
+
+
+void LivenessAnalyzer::VisitAssignment(Assignment* expr) {
+ Property* prop = expr->target()->AsProperty();
+ ASSERT(prop != NULL);
+ ASSERT(prop->key()->IsPropertyName());
+ VariableProxy* proxy = prop->obj()->AsVariableProxy();
+ ASSERT(proxy != NULL && proxy->var()->is_this());
+
+ // Record use of this at the assignment node. Assignments to
+ // this-properties are treated like unary operations.
+ RecordUse(proxy->var(), expr);
+
+ // Visit right-hand side.
+ Visit(expr->value());
+}
+
+
+void LivenessAnalyzer::VisitThrow(Throw* expr) {
+ UNREACHABLE();
+}
+
+
+void LivenessAnalyzer::VisitProperty(Property* expr) {
+ ASSERT(expr->key()->IsPropertyName());
+ VariableProxy* proxy = expr->obj()->AsVariableProxy();
+ ASSERT(proxy != NULL && proxy->var()->is_this());
+ RecordUse(proxy->var(), expr);
+}
+
+
+void LivenessAnalyzer::VisitCall(Call* expr) {
+ UNREACHABLE();
+}
+
+
+void LivenessAnalyzer::VisitCallNew(CallNew* expr) {
+ UNREACHABLE();
+}
+
+
+void LivenessAnalyzer::VisitCallRuntime(CallRuntime* expr) {
+ UNREACHABLE();
+}
+
+
+void LivenessAnalyzer::VisitUnaryOperation(UnaryOperation* expr) {
+ UNREACHABLE();
+}
+
+
+void LivenessAnalyzer::VisitCountOperation(CountOperation* expr) {
+ UNREACHABLE();
+}
+
+
+void LivenessAnalyzer::VisitBinaryOperation(BinaryOperation* expr) {
+ // Visit child nodes in reverse evaluation order.
+ Visit(expr->right());
+ Visit(expr->left());
+}
+
+
+void LivenessAnalyzer::VisitCompareOperation(CompareOperation* expr) {
+ UNREACHABLE();
+}
+
+
+void LivenessAnalyzer::VisitThisFunction(ThisFunction* expr) {
+ UNREACHABLE();
+}
+
+
} } // namespace v8::internal
diff --git a/deps/v8/src/data-flow.h b/deps/v8/src/data-flow.h
index ac8350318..233194440 100644
--- a/deps/v8/src/data-flow.h
+++ b/deps/v8/src/data-flow.h
@@ -29,7 +29,7 @@
#define V8_DATAFLOW_H_
#include "ast.h"
-#include "scopes.h"
+#include "compiler.h"
namespace v8 {
namespace internal {
@@ -38,13 +38,13 @@ namespace internal {
// their evaluation order (post-order left-to-right traversal).
class AstLabeler: public AstVisitor {
public:
- AstLabeler() : next_number_(0), has_this_properties_(false) {}
+ AstLabeler() : next_number_(0) {}
- void Label(FunctionLiteral* fun);
-
- bool has_this_properties() { return has_this_properties_; }
+ void Label(CompilationInfo* info);
private:
+ CompilationInfo* info() { return info_; }
+
void VisitDeclarations(ZoneList<Declaration*>* decls);
void VisitStatements(ZoneList<Statement*>* stmts);
@@ -56,12 +56,62 @@ class AstLabeler: public AstVisitor {
// Traversal number for labelling AST nodes.
int next_number_;
- bool has_this_properties_;
+ CompilationInfo* info_;
DISALLOW_COPY_AND_ASSIGN(AstLabeler);
};
+class VarUseMap : public HashMap {
+ public:
+ VarUseMap() : HashMap(VarMatch) {}
+
+ ZoneList<Expression*>* Lookup(Variable* var);
+
+ private:
+ static bool VarMatch(void* key1, void* key2) { return key1 == key2; }
+};
+
+
+class DefinitionInfo : public ZoneObject {
+ public:
+ explicit DefinitionInfo() : last_use_(NULL) {}
+
+ Expression* last_use() { return last_use_; }
+ void set_last_use(Expression* expr) { last_use_ = expr; }
+
+ private:
+ Expression* last_use_;
+ Register location_;
+};
+
+
+class LivenessAnalyzer : public AstVisitor {
+ public:
+ LivenessAnalyzer() {}
+
+ void Analyze(FunctionLiteral* fun);
+
+ private:
+ void VisitStatements(ZoneList<Statement*>* stmts);
+
+ void RecordUse(Variable* var, Expression* expr);
+ void RecordDef(Variable* var, Expression* expr);
+
+
+ // AST node visit functions.
+#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
+ AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
+
+ // Map for tracking the live variables.
+ VarUseMap live_vars_;
+
+ DISALLOW_COPY_AND_ASSIGN(LivenessAnalyzer);
+};
+
+
} } // namespace v8::internal
+
#endif // V8_DATAFLOW_H_
diff --git a/deps/v8/src/debug-delay.js b/deps/v8/src/debug-delay.js
index 14d8c8830..754ac5d04 100644
--- a/deps/v8/src/debug-delay.js
+++ b/deps/v8/src/debug-delay.js
@@ -1934,10 +1934,14 @@ DebugCommandProcessor.prototype.profileRequest_ = function(request, response) {
if (isNaN(modules)) {
return response.failed('Modules is not an integer');
}
+ var tag = parseInt(request.arguments.tag);
+ if (isNaN(tag)) {
+ tag = 0;
+ }
if (request.arguments.command == 'resume') {
- %ProfilerResume(modules);
+ %ProfilerResume(modules, tag);
} else if (request.arguments.command == 'pause') {
- %ProfilerPause(modules);
+ %ProfilerPause(modules, tag);
} else {
return response.failed('Unknown command');
}
diff --git a/deps/v8/src/debug.cc b/deps/v8/src/debug.cc
index fb9b23eb5..8c4f51d95 100644
--- a/deps/v8/src/debug.cc
+++ b/deps/v8/src/debug.cc
@@ -31,6 +31,7 @@
#include "arguments.h"
#include "bootstrapper.h"
#include "code-stubs.h"
+#include "codegen.h"
#include "compilation-cache.h"
#include "compiler.h"
#include "debug.h"
@@ -453,15 +454,7 @@ void BreakLocationIterator::ClearDebugBreakAtIC() {
bool BreakLocationIterator::IsDebuggerStatement() {
- if (RelocInfo::IsCodeTarget(rmode())) {
- Address target = original_rinfo()->target_address();
- Code* code = Code::GetCodeFromTargetAddress(target);
- if (code->kind() == Code::STUB) {
- CodeStub::Major major_key = code->major_key();
- return (major_key == CodeStub::DebuggerStatement);
- }
- }
- return false;
+ return RelocInfo::DEBUG_BREAK == rmode();
}
@@ -690,7 +683,8 @@ bool Debug::CompileDebuggerScript(int index) {
bool allow_natives_syntax = FLAG_allow_natives_syntax;
FLAG_allow_natives_syntax = true;
Handle<JSFunction> boilerplate;
- boilerplate = Compiler::Compile(source_code, script_name, 0, 0, NULL, NULL);
+ boilerplate = Compiler::Compile(source_code, script_name, 0, 0, NULL, NULL,
+ Handle<String>::null());
FLAG_allow_natives_syntax = allow_natives_syntax;
// Silently ignore stack overflows during compilation.
diff --git a/deps/v8/src/disassembler.cc b/deps/v8/src/disassembler.cc
index 50f3eb996..8473cd9f9 100644
--- a/deps/v8/src/disassembler.cc
+++ b/deps/v8/src/disassembler.cc
@@ -261,7 +261,7 @@ static int DecodeIt(FILE* f,
ASSERT(code->major_key() == CodeStub::MajorKeyFromKey(key));
out.AddFormatted(" %s, %s, ",
Code::Kind2String(kind),
- CodeStub::MajorName(code->major_key()));
+ CodeStub::MajorName(code->major_key(), false));
switch (code->major_key()) {
case CodeStub::CallFunction:
out.AddFormatted("argc = %d", minor_key);
diff --git a/deps/v8/src/execution.cc b/deps/v8/src/execution.cc
index a79af2373..20684136c 100644
--- a/deps/v8/src/execution.cc
+++ b/deps/v8/src/execution.cc
@@ -91,7 +91,7 @@ static Handle<Object> Invoke(bool construct,
JSEntryFunction entry = FUNCTION_CAST<JSEntryFunction>(code->entry());
// Call the function through the right JS entry stub.
- byte* entry_address= func->code()->entry();
+ byte* entry_address = func->code()->entry();
JSFunction* function = *func;
Object* receiver_pointer = *receiver;
value = CALL_GENERATED_CODE(entry, entry_address, function,
diff --git a/deps/v8/src/fast-codegen.cc b/deps/v8/src/fast-codegen.cc
index 4e6f259c6..ecd26527c 100644
--- a/deps/v8/src/fast-codegen.cc
+++ b/deps/v8/src/fast-codegen.cc
@@ -51,8 +51,7 @@ namespace internal {
} while (false)
-void FastCodeGenSyntaxChecker::Check(FunctionLiteral* fun,
- CompilationInfo* info) {
+void FastCodeGenSyntaxChecker::Check(CompilationInfo* info) {
info_ = info;
// We do not specialize if we do not have a receiver or if it is not a
@@ -64,7 +63,7 @@ void FastCodeGenSyntaxChecker::Check(FunctionLiteral* fun,
// We do not support stack or heap slots (both of which require
// allocation).
- Scope* scope = fun->scope();
+ Scope* scope = info->scope();
if (scope->num_stack_slots() > 0) {
BAILOUT("Function has stack-allocated locals");
}
@@ -76,8 +75,10 @@ void FastCodeGenSyntaxChecker::Check(FunctionLiteral* fun,
CHECK_BAILOUT;
// We do not support empty function bodies.
- if (fun->body()->is_empty()) BAILOUT("Function has an empty body");
- VisitStatements(fun->body());
+ if (info->function()->body()->is_empty()) {
+ BAILOUT("Function has an empty body");
+ }
+ VisitStatements(info->function()->body());
}
@@ -88,10 +89,10 @@ void FastCodeGenSyntaxChecker::VisitDeclarations(
void FastCodeGenSyntaxChecker::VisitStatements(ZoneList<Statement*>* stmts) {
- for (int i = 0, len = stmts->length(); i < len; i++) {
- Visit(stmts->at(i));
- CHECK_BAILOUT;
+ if (stmts->length() != 1) {
+ BAILOUT("Function body is not a singleton statement.");
}
+ Visit(stmts->at(0));
}
@@ -213,7 +214,24 @@ void FastCodeGenSyntaxChecker::VisitSlot(Slot* expr) {
void FastCodeGenSyntaxChecker::VisitVariableProxy(VariableProxy* expr) {
// Only global variable references are supported.
Variable* var = expr->var();
- if (!var->is_global()) BAILOUT("Non-global variable");
+ if (!var->is_global() || var->is_this()) BAILOUT("Non-global variable");
+
+ // Check if the global variable is existing and non-deletable.
+ if (info()->has_global_object()) {
+ LookupResult lookup;
+ info()->global_object()->Lookup(*expr->name(), &lookup);
+ if (!lookup.IsProperty()) {
+ BAILOUT("Non-existing global variable");
+ }
+ // We do not handle global variables with accessors or interceptors.
+ if (lookup.type() != NORMAL) {
+ BAILOUT("Global variable with accessors or interceptors.");
+ }
+ // We do not handle deletable global variables.
+ if (!lookup.IsDontDelete()) {
+ BAILOUT("Deletable global variable");
+ }
+ }
}
@@ -266,6 +284,9 @@ void FastCodeGenSyntaxChecker::VisitAssignment(Assignment* expr) {
Handle<String> name = Handle<String>::cast(key->handle());
LookupResult lookup;
receiver->Lookup(*name, &lookup);
+ if (!lookup.IsProperty()) {
+ BAILOUT("Assigned property not found at compile time");
+ }
if (lookup.holder() != *receiver) BAILOUT("Non-own property assignment");
if (!lookup.type() == FIELD) BAILOUT("Non-field property assignment");
} else {
@@ -283,7 +304,33 @@ void FastCodeGenSyntaxChecker::VisitThrow(Throw* expr) {
void FastCodeGenSyntaxChecker::VisitProperty(Property* expr) {
- BAILOUT("Property");
+ // We support named this property references.
+ VariableProxy* proxy = expr->obj()->AsVariableProxy();
+ if (proxy == NULL || !proxy->var()->is_this()) {
+ BAILOUT("Non-this-property reference");
+ }
+ if (!expr->key()->IsPropertyName()) {
+ BAILOUT("Non-named-property reference");
+ }
+
+ // We will only specialize for fields on the object itself.
+ // Expression::IsPropertyName implies that the name is a literal
+ // symbol but we do not assume that.
+ Literal* key = expr->key()->AsLiteral();
+ if (key != NULL && key->handle()->IsString()) {
+ Handle<Object> receiver = info()->receiver();
+ Handle<String> name = Handle<String>::cast(key->handle());
+ LookupResult lookup;
+ receiver->Lookup(*name, &lookup);
+ if (!lookup.IsProperty()) {
+ BAILOUT("Referenced property not found at compile time");
+ }
+ if (lookup.holder() != *receiver) BAILOUT("Non-own property reference");
+ if (!lookup.type() == FIELD) BAILOUT("Non-field property reference");
+ } else {
+ UNREACHABLE();
+ BAILOUT("Unexpected non-string-literal property key");
+ }
}
@@ -313,7 +360,58 @@ void FastCodeGenSyntaxChecker::VisitCountOperation(CountOperation* expr) {
void FastCodeGenSyntaxChecker::VisitBinaryOperation(BinaryOperation* expr) {
- BAILOUT("BinaryOperation");
+ // We support bitwise OR.
+ switch (expr->op()) {
+ case Token::COMMA:
+ BAILOUT("BinaryOperation COMMA");
+ case Token::OR:
+ BAILOUT("BinaryOperation OR");
+ case Token::AND:
+ BAILOUT("BinaryOperation AND");
+
+ case Token::BIT_OR:
+ // We support expressions nested on the left because they only require
+ // a pair of registers to keep all intermediate values in registers
+ // (i.e., the expression stack has height no more than two).
+ if (!expr->right()->IsLeaf()) BAILOUT("expression nested on right");
+
+ // We do not allow subexpressions with side effects because we
+ // (currently) bail out to the beginning of the full function. The
+ // only expressions with side effects that we would otherwise handle
+ // are assignments.
+ if (expr->left()->AsAssignment() != NULL ||
+ expr->right()->AsAssignment() != NULL) {
+ BAILOUT("subexpression of binary operation has side effects");
+ }
+
+ Visit(expr->left());
+ CHECK_BAILOUT;
+ Visit(expr->right());
+ break;
+
+ case Token::BIT_XOR:
+ BAILOUT("BinaryOperation BIT_XOR");
+ case Token::BIT_AND:
+ BAILOUT("BinaryOperation BIT_AND");
+ case Token::SHL:
+ BAILOUT("BinaryOperation SHL");
+ case Token::SAR:
+ BAILOUT("BinaryOperation SAR");
+ case Token::SHR:
+ BAILOUT("BinaryOperation SHR");
+ case Token::ADD:
+ BAILOUT("BinaryOperation ADD");
+ case Token::SUB:
+ BAILOUT("BinaryOperation SUB");
+ case Token::MUL:
+ BAILOUT("BinaryOperation MUL");
+ case Token::DIV:
+ BAILOUT("BinaryOperation DIV");
+ case Token::MOD:
+ BAILOUT("BinaryOperation MOD");
+ default:
+ UNREACHABLE();
+ }
}
@@ -332,24 +430,23 @@ void FastCodeGenSyntaxChecker::VisitThisFunction(ThisFunction* expr) {
#define __ ACCESS_MASM(masm())
-Handle<Code> FastCodeGenerator::MakeCode(FunctionLiteral* fun,
- Handle<Script> script,
- bool is_eval,
- CompilationInfo* info) {
+Handle<Code> FastCodeGenerator::MakeCode(CompilationInfo* info) {
// Label the AST before calling MakeCodePrologue, so AST node numbers are
// printed with the AST.
AstLabeler labeler;
- labeler.Label(fun);
- info->set_has_this_properties(labeler.has_this_properties());
+ labeler.Label(info);
+
+ LivenessAnalyzer analyzer;
+ analyzer.Analyze(info->function());
- CodeGenerator::MakeCodePrologue(fun);
+ CodeGenerator::MakeCodePrologue(info);
const int kInitialBufferSize = 4 * KB;
MacroAssembler masm(NULL, kInitialBufferSize);
// Generate the fast-path code.
- FastCodeGenerator fast_cgen(&masm, script, is_eval);
- fast_cgen.Generate(fun, info);
+ FastCodeGenerator fast_cgen(&masm);
+ fast_cgen.Generate(info);
if (fast_cgen.HasStackOverflow()) {
ASSERT(!Top::has_pending_exception());
return Handle<Code>::null();
@@ -357,16 +454,16 @@ Handle<Code> FastCodeGenerator::MakeCode(FunctionLiteral* fun,
// Generate the full code for the function in bailout mode, using the same
// macro assembler.
- CodeGenerator cgen(&masm, script, is_eval);
+ CodeGenerator cgen(&masm);
CodeGeneratorScope scope(&cgen);
- cgen.Generate(fun, CodeGenerator::SECONDARY, info);
+ cgen.Generate(info, CodeGenerator::SECONDARY);
if (cgen.HasStackOverflow()) {
ASSERT(!Top::has_pending_exception());
return Handle<Code>::null();
}
Code::Flags flags = Code::ComputeFlags(Code::FUNCTION, NOT_IN_LOOP);
- return CodeGenerator::MakeCodeEpilogue(fun, &masm, flags, script);
+ return CodeGenerator::MakeCodeEpilogue(&masm, flags, info);
}
@@ -483,12 +580,28 @@ void FastCodeGenerator::VisitSlot(Slot* expr) {
void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
ASSERT(expr->var()->is_global() && !expr->var()->is_this());
- Comment cmnt(masm(), ";; Global");
- if (FLAG_print_ir) {
- SmartPointer<char> name = expr->name()->ToCString();
- PrintF("%d: t%d = Global(%s)\n", expr->num(), expr->num(), *name);
+ // Check if we can compile a global variable load directly from the cell.
+ ASSERT(info()->has_global_object());
+ LookupResult lookup;
+ info()->global_object()->Lookup(*expr->name(), &lookup);
+ // We only support normal (non-accessor/interceptor) DontDelete properties
+ // for now.
+ ASSERT(lookup.IsProperty());
+ ASSERT_EQ(NORMAL, lookup.type());
+ ASSERT(lookup.IsDontDelete());
+ Handle<Object> cell(info()->global_object()->GetPropertyCell(&lookup));
+
+ // Global variable lookups do not have side effects, so we do not need to
+ // emit code if we are in an effect context.
+ if (!destination().is(no_reg)) {
+ Comment cmnt(masm(), ";; Global");
+ if (FLAG_print_ir) {
+ SmartPointer<char> name = expr->name()->ToCString();
+ PrintF("%d: t%d = Global(%s) // last_use = %d\n", expr->num(),
+ expr->num(), *name, expr->var_def()->last_use()->num());
+ }
+ EmitGlobalVariableLoad(cell);
}
- EmitGlobalVariableLoad(expr->name());
}
@@ -518,8 +631,13 @@ void FastCodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* expr) {
void FastCodeGenerator::VisitAssignment(Assignment* expr) {
- // Known to be a simple this property assignment.
- Visit(expr->value());
+ // Known to be a simple this property assignment. Effectively a unary
+ // operation.
+ { Register my_destination = destination();
+ set_destination(accumulator0());
+ Visit(expr->value());
+ set_destination(my_destination);
+ }
Property* prop = expr->target()->AsProperty();
ASSERT_NOT_NULL(prop);
@@ -529,11 +647,14 @@ void FastCodeGenerator::VisitAssignment(Assignment* expr) {
Handle<String> name =
Handle<String>::cast(prop->key()->AsLiteral()->handle());
- Comment cmnt(masm(), ";; Store(this)");
+ Comment cmnt(masm(), ";; Store to this");
if (FLAG_print_ir) {
SmartPointer<char> name_string = name->ToCString();
- PrintF("%d: t%d = Store(this, \"%s\", t%d)\n",
- expr->num(), expr->num(), *name_string, expr->value()->num());
+ PrintF("%d: ", expr->num());
+ if (!destination().is(no_reg)) PrintF("t%d = ", expr->num());
+ PrintF("Store(this, \"%s\", t%d) // last_use(this) = %d\n", *name_string,
+ expr->value()->num(),
+ expr->var_def()->last_use()->num());
}
EmitThisPropertyStore(name);
@@ -546,7 +667,22 @@ void FastCodeGenerator::VisitThrow(Throw* expr) {
void FastCodeGenerator::VisitProperty(Property* expr) {
- UNREACHABLE();
+ ASSERT_NOT_NULL(expr->obj()->AsVariableProxy());
+ ASSERT(expr->obj()->AsVariableProxy()->var()->is_this());
+ ASSERT(expr->key()->IsPropertyName());
+ if (!destination().is(no_reg)) {
+ Handle<String> name =
+ Handle<String>::cast(expr->key()->AsLiteral()->handle());
+
+ Comment cmnt(masm(), ";; Load from this");
+ if (FLAG_print_ir) {
+ SmartPointer<char> name_string = name->ToCString();
+ PrintF("%d: t%d = Load(this, \"%s\") // last_use(this) = %d\n",
+ expr->num(), expr->num(), *name_string,
+ expr->var_def()->last_use()->num());
+ }
+ EmitThisPropertyLoad(name);
+ }
}
@@ -576,7 +712,26 @@ void FastCodeGenerator::VisitCountOperation(CountOperation* expr) {
void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
- UNREACHABLE();
+ // We support limited binary operations: bitwise OR only allowed to be
+ // nested on the left.
+ ASSERT(expr->op() == Token::BIT_OR);
+ ASSERT(expr->right()->IsLeaf());
+
+ { Register my_destination = destination();
+ set_destination(accumulator1());
+ Visit(expr->left());
+ set_destination(accumulator0());
+ Visit(expr->right());
+ set_destination(my_destination);
+ }
+
+ Comment cmnt(masm(), ";; BIT_OR");
+ if (FLAG_print_ir) {
+ PrintF("%d: ", expr->num());
+ if (!destination().is(no_reg)) PrintF("t%d = ", expr->num());
+ PrintF("BIT_OR(t%d, t%d)\n", expr->left()->num(), expr->right()->num());
+ }
+ EmitBitOr();
}
diff --git a/deps/v8/src/fast-codegen.h b/deps/v8/src/fast-codegen.h
index b40f6fb7f..96ee5dddb 100644
--- a/deps/v8/src/fast-codegen.h
+++ b/deps/v8/src/fast-codegen.h
@@ -42,7 +42,7 @@ class FastCodeGenSyntaxChecker: public AstVisitor {
: info_(NULL), has_supported_syntax_(true) {
}
- void Check(FunctionLiteral* fun, CompilationInfo* info);
+ void Check(CompilationInfo* info);
CompilationInfo* info() { return info_; }
bool has_supported_syntax() { return has_supported_syntax_; }
@@ -65,62 +65,86 @@ class FastCodeGenSyntaxChecker: public AstVisitor {
class FastCodeGenerator: public AstVisitor {
public:
- FastCodeGenerator(MacroAssembler* masm, Handle<Script> script, bool is_eval)
- : masm_(masm),
- script_(script),
- is_eval_(is_eval),
- function_(NULL),
- info_(NULL) {
+ explicit FastCodeGenerator(MacroAssembler* masm)
+ : masm_(masm), info_(NULL), destination_(no_reg), smi_bits_(0) {
}
- static Handle<Code> MakeCode(FunctionLiteral* fun,
- Handle<Script> script,
- bool is_eval,
- CompilationInfo* info);
+ static Handle<Code> MakeCode(CompilationInfo* info);
- void Generate(FunctionLiteral* fun, CompilationInfo* info);
+ void Generate(CompilationInfo* compilation_info);
private:
MacroAssembler* masm() { return masm_; }
- FunctionLiteral* function() { return function_; }
+ CompilationInfo* info() { return info_; }
Label* bailout() { return &bailout_; }
- bool has_receiver() { return !info_->receiver().is_null(); }
- Handle<Object> receiver() { return info_->receiver(); }
- bool has_this_properties() { return info_->has_this_properties(); }
+ Register destination() { return destination_; }
+ void set_destination(Register reg) { destination_ = reg; }
+
+ FunctionLiteral* function() { return info_->function(); }
+ Scope* scope() { return info_->scope(); }
+
+ // Platform-specific fixed registers, all guaranteed distinct.
+ Register accumulator0();
+ Register accumulator1();
+ Register scratch0();
+ Register scratch1();
+ Register receiver_reg();
+ Register context_reg();
+
+ Register other_accumulator(Register reg) {
+ ASSERT(reg.is(accumulator0()) || reg.is(accumulator1()));
+ return (reg.is(accumulator0())) ? accumulator1() : accumulator0();
+ }
+
+ // Flags are true if the respective register is statically known to hold a
+ // smi. We do not track every register, only the accumulator registers.
+ bool is_smi(Register reg) {
+ ASSERT(!reg.is(no_reg));
+ return (smi_bits_ & reg.bit()) != 0;
+ }
+ void set_as_smi(Register reg) {
+ ASSERT(!reg.is(no_reg));
+ smi_bits_ = smi_bits_ | reg.bit();
+ }
+ void clear_as_smi(Register reg) {
+ ASSERT(!reg.is(no_reg));
+ smi_bits_ = smi_bits_ & ~reg.bit();
+ }
// AST node visit functions.
#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
- // Emit code to load the receiver from the stack into a given register.
- void EmitLoadReceiver(Register reg);
+ // Emit code to load the receiver from the stack into receiver_reg.
+ void EmitLoadReceiver();
- // Emit code to check that the receiver has the same map as the
- // compile-time receiver. Receiver is expected in {ia32-edx, x64-rdx,
- // arm-r1}. Emit a branch to the (single) bailout label if check fails.
- void EmitReceiverMapCheck();
-
- // Emit code to load a global variable value into {is32-eax, x64-rax,
- // arm-r0}. Register {ia32-edx, x64-rdx, arm-r1} is preserved if it is
- // holding the receiver and {is32-ecx, x64-rcx, arm-r2} is always
- // clobbered.
- void EmitGlobalVariableLoad(Handle<String> name);
+ // Emit code to load a global variable directly from a global property
+ // cell into the destination register.
+ void EmitGlobalVariableLoad(Handle<Object> cell);
// Emit a store to an own property of this. The stored value is expected
- // in {ia32-eax, x64-rax, arm-r0} and the receiver in {is32-edx, x64-rdx,
- // arm-r1}. Both are preserve.
+ // in accumulator0 and the receiver in receiver_reg. The receiver
+ // register is preserved and the result (the stored value) is left in the
+ // destination register.
void EmitThisPropertyStore(Handle<String> name);
- MacroAssembler* masm_;
- Handle<Script> script_;
- bool is_eval_;
+ // Emit a load from an own property of this. The receiver is expected in
+ // receiver_reg. The receiver register is preserved and the result is
+ // left in the destination register.
+ void EmitThisPropertyLoad(Handle<String> name);
- FunctionLiteral* function_;
- CompilationInfo* info_;
+ // Emit a bitwise or operation. The left operand is in accumulator1 and
+ // the right is in accumulator0. The result should be left in the
+ // destination register.
+ void EmitBitOr();
+ MacroAssembler* masm_;
+ CompilationInfo* info_;
Label bailout_;
+ Register destination_;
+ uint32_t smi_bits_;
DISALLOW_COPY_AND_ASSIGN(FastCodeGenerator);
};
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h
index b57f2cb62..6e22d5bbc 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flag-definitions.h
@@ -116,6 +116,8 @@ DEFINE_bool(enable_sahf, true,
"enable use of SAHF instruction if available (X64 only)")
DEFINE_bool(enable_vfp3, true,
"enable use of VFP3 instructions if available (ARM only)")
+DEFINE_bool(enable_armv7, true,
+ "enable use of ARMv7 instructions if available (ARM only)")
// bootstrapper.cc
DEFINE_string(expose_natives_as, NULL, "expose natives in global object")
@@ -218,7 +220,7 @@ DEFINE_bool(allow_natives_syntax, false, "allow natives syntax")
// rewriter.cc
DEFINE_bool(optimize_ast, true, "optimize the ast")
-// simulator-arm.cc
+// simulator-arm.cc and simulator-mips.cc
DEFINE_bool(trace_sim, false, "trace simulator execution")
DEFINE_int(stop_sim_at, 0, "Simulator stop after x number of instructions")
diff --git a/deps/v8/src/frame-element.cc b/deps/v8/src/frame-element.cc
index e6bc2eafd..14555596a 100644
--- a/deps/v8/src/frame-element.cc
+++ b/deps/v8/src/frame-element.cc
@@ -32,10 +32,6 @@
namespace v8 {
namespace internal {
-// -------------------------------------------------------------------------
-// FrameElement implementation.
-
-
FrameElement::ZoneObjectList* FrameElement::ConstantList() {
static ZoneObjectList list(10);
return &list;
diff --git a/deps/v8/src/frame-element.h b/deps/v8/src/frame-element.h
index ccdecf1d6..3ae6d303f 100644
--- a/deps/v8/src/frame-element.h
+++ b/deps/v8/src/frame-element.h
@@ -28,7 +28,8 @@
#ifndef V8_FRAME_ELEMENT_H_
#define V8_FRAME_ELEMENT_H_
-#include "register-allocator-inl.h"
+#include "number-info.h"
+#include "macro-assembler.h"
namespace v8 {
namespace internal {
@@ -52,11 +53,28 @@ class FrameElement BASE_EMBEDDED {
SYNCED
};
+ inline NumberInfo::Type number_info() {
+ // Copied elements do not have number info. Instead
+ // we have to inspect their backing element in the frame.
+ ASSERT(!is_copy());
+ if (!is_constant()) return NumberInfoField::decode(value_);
+ Handle<Object> value = handle();
+ if (value->IsSmi()) return NumberInfo::kSmi;
+ if (value->IsHeapNumber()) return NumberInfo::kHeapNumber;
+ return NumberInfo::kUnknown;
+ }
+
+ inline void set_number_info(NumberInfo::Type info) {
+ value_ = value_ & ~NumberInfoField::mask();
+ value_ = value_ | NumberInfoField::encode(info);
+ }
+
// The default constructor creates an invalid frame element.
FrameElement() {
value_ = TypeField::encode(INVALID)
| CopiedField::encode(false)
| SyncedField::encode(false)
+ | NumberInfoField::encode(NumberInfo::kUninitialized)
| DataField::encode(0);
}
@@ -67,15 +85,16 @@ class FrameElement BASE_EMBEDDED {
}
// Factory function to construct an in-memory frame element.
- static FrameElement MemoryElement() {
- FrameElement result(MEMORY, no_reg, SYNCED);
+ static FrameElement MemoryElement(NumberInfo::Type info) {
+ FrameElement result(MEMORY, no_reg, SYNCED, info);
return result;
}
// Factory function to construct an in-register frame element.
static FrameElement RegisterElement(Register reg,
- SyncFlag is_synced) {
- return FrameElement(REGISTER, reg, is_synced);
+ SyncFlag is_synced,
+ NumberInfo::Type info) {
+ return FrameElement(REGISTER, reg, is_synced, info);
}
// Factory function to construct a frame element whose value is known at
@@ -185,10 +204,14 @@ class FrameElement BASE_EMBEDDED {
};
// Used to construct memory and register elements.
- FrameElement(Type type, Register reg, SyncFlag is_synced) {
+ FrameElement(Type type,
+ Register reg,
+ SyncFlag is_synced,
+ NumberInfo::Type info) {
value_ = TypeField::encode(type)
| CopiedField::encode(false)
| SyncedField::encode(is_synced != NOT_SYNCED)
+ | NumberInfoField::encode(info)
| DataField::encode(reg.code_ > 0 ? reg.code_ : 0);
}
@@ -197,6 +220,7 @@ class FrameElement BASE_EMBEDDED {
value_ = TypeField::encode(CONSTANT)
| CopiedField::encode(false)
| SyncedField::encode(is_synced != NOT_SYNCED)
+ | NumberInfoField::encode(NumberInfo::kUninitialized)
| DataField::encode(ConstantList()->length());
ConstantList()->Add(value);
}
@@ -223,9 +247,10 @@ class FrameElement BASE_EMBEDDED {
uint32_t value_;
class TypeField: public BitField<Type, 0, 3> {};
- class CopiedField: public BitField<uint32_t, 3, 1> {};
- class SyncedField: public BitField<uint32_t, 4, 1> {};
- class DataField: public BitField<uint32_t, 5, 32 - 6> {};
+ class CopiedField: public BitField<bool, 3, 1> {};
+ class SyncedField: public BitField<bool, 4, 1> {};
+ class NumberInfoField: public BitField<NumberInfo::Type, 5, 3> {};
+ class DataField: public BitField<uint32_t, 8, 32 - 9> {};
friend class VirtualFrame;
};
diff --git a/deps/v8/src/frames-inl.h b/deps/v8/src/frames-inl.h
index c5f2f1a33..722185132 100644
--- a/deps/v8/src/frames-inl.h
+++ b/deps/v8/src/frames-inl.h
@@ -36,6 +36,8 @@
#include "x64/frames-x64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/frames-arm.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "mips/frames-mips.h"
#else
#error Unsupported target architecture.
#endif
diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc
index e56a2c83e..5d88265c7 100644
--- a/deps/v8/src/frames.cc
+++ b/deps/v8/src/frames.cc
@@ -408,12 +408,7 @@ Object*& ExitFrame::code_slot() const {
Code* ExitFrame::code() const {
- Object* code = code_slot();
- if (code->IsSmi()) {
- return Heap::debugger_statement_code();
- } else {
- return Code::cast(code);
- }
+ return Code::cast(code_slot());
}
diff --git a/deps/v8/src/full-codegen.cc b/deps/v8/src/full-codegen.cc
index 01714cbb1..63714392c 100644
--- a/deps/v8/src/full-codegen.cc
+++ b/deps/v8/src/full-codegen.cc
@@ -32,6 +32,7 @@
#include "full-codegen.h"
#include "stub-cache.h"
#include "debug.h"
+#include "liveedit.h"
namespace v8 {
namespace internal {
@@ -439,24 +440,27 @@ void FullCodeGenSyntaxChecker::VisitThisFunction(ThisFunction* expr) {
#define __ ACCESS_MASM(masm())
-Handle<Code> FullCodeGenerator::MakeCode(FunctionLiteral* fun,
- Handle<Script> script,
- bool is_eval) {
+Handle<Code> FullCodeGenerator::MakeCode(CompilationInfo* info) {
+ Handle<Script> script = info->script();
if (!script->IsUndefined() && !script->source()->IsUndefined()) {
int len = String::cast(script->source())->length();
Counters::total_full_codegen_source_size.Increment(len);
}
- CodeGenerator::MakeCodePrologue(fun);
+ CodeGenerator::MakeCodePrologue(info);
const int kInitialBufferSize = 4 * KB;
MacroAssembler masm(NULL, kInitialBufferSize);
- FullCodeGenerator cgen(&masm, script, is_eval);
- cgen.Generate(fun, PRIMARY);
+ LiveEditFunctionTracker live_edit_tracker(info->function());
+
+ FullCodeGenerator cgen(&masm);
+ cgen.Generate(info, PRIMARY);
if (cgen.HasStackOverflow()) {
ASSERT(!Top::has_pending_exception());
return Handle<Code>::null();
}
Code::Flags flags = Code::ComputeFlags(Code::FUNCTION, NOT_IN_LOOP);
- return CodeGenerator::MakeCodeEpilogue(fun, &masm, flags, script);
+ Handle<Code> result = CodeGenerator::MakeCodeEpilogue(&masm, flags, info);
+ live_edit_tracker.RecordFunctionCode(result);
+ return result;
}
@@ -467,7 +471,7 @@ int FullCodeGenerator::SlotOffset(Slot* slot) {
// Adjust by a (parameter or local) base offset.
switch (slot->type()) {
case Slot::PARAMETER:
- offset += (function_->scope()->num_parameters() + 1) * kPointerSize;
+ offset += (scope()->num_parameters() + 1) * kPointerSize;
break;
case Slot::LOCAL:
offset += JavaScriptFrameConstants::kLocal0Offset;
@@ -520,7 +524,7 @@ void FullCodeGenerator::VisitDeclarations(
}
} else {
Handle<JSFunction> function =
- Compiler::BuildBoilerplate(decl->fun(), script_, this);
+ Compiler::BuildBoilerplate(decl->fun(), script(), this);
// Check for stack-overflow exception.
if (HasStackOverflow()) return;
array->set(j++, *function);
@@ -987,8 +991,7 @@ void FullCodeGenerator::VisitDebuggerStatement(DebuggerStatement* stmt) {
Comment cmnt(masm_, "[ DebuggerStatement");
SetStatementPosition(stmt);
- DebuggerStatementStub ces;
- __ CallStub(&ces);
+ __ DebugBreak();
// Ignore the return value.
#endif
}
@@ -1033,86 +1036,6 @@ void FullCodeGenerator::VisitLiteral(Literal* expr) {
}
-void FullCodeGenerator::VisitAssignment(Assignment* expr) {
- Comment cmnt(masm_, "[ Assignment");
- ASSERT(expr->op() != Token::INIT_CONST);
- // Left-hand side can only be a property, a global or a (parameter or local)
- // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
- enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
- LhsKind assign_type = VARIABLE;
- Property* prop = expr->target()->AsProperty();
- if (prop != NULL) {
- assign_type =
- (prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
- }
-
- // Evaluate LHS expression.
- switch (assign_type) {
- case VARIABLE:
- // Nothing to do here.
- break;
- case NAMED_PROPERTY:
- VisitForValue(prop->obj(), kStack);
- break;
- case KEYED_PROPERTY:
- VisitForValue(prop->obj(), kStack);
- VisitForValue(prop->key(), kStack);
- break;
- }
-
- // If we have a compound assignment: Get value of LHS expression and
- // store in on top of the stack.
- if (expr->is_compound()) {
- Location saved_location = location_;
- location_ = kStack;
- switch (assign_type) {
- case VARIABLE:
- EmitVariableLoad(expr->target()->AsVariableProxy()->var(),
- Expression::kValue);
- break;
- case NAMED_PROPERTY:
- EmitNamedPropertyLoad(prop);
- __ push(result_register());
- break;
- case KEYED_PROPERTY:
- EmitKeyedPropertyLoad(prop);
- __ push(result_register());
- break;
- }
- location_ = saved_location;
- }
-
- // Evaluate RHS expression.
- Expression* rhs = expr->value();
- VisitForValue(rhs, kAccumulator);
-
- // If we have a compound assignment: Apply operator.
- if (expr->is_compound()) {
- Location saved_location = location_;
- location_ = kAccumulator;
- EmitBinaryOp(expr->binary_op(), Expression::kValue);
- location_ = saved_location;
- }
-
- // Record source position before possible IC call.
- SetSourcePosition(expr->position());
-
- // Store the value.
- switch (assign_type) {
- case VARIABLE:
- EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
- context_);
- break;
- case NAMED_PROPERTY:
- EmitNamedPropertyAssignment(expr);
- break;
- case KEYED_PROPERTY:
- EmitKeyedPropertyAssignment(expr);
- break;
- }
-}
-
-
void FullCodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* expr) {
// Call runtime routine to allocate the catch extension object and
// assign the exception value to the catch variable.
diff --git a/deps/v8/src/full-codegen.h b/deps/v8/src/full-codegen.h
index 6688ff7c5..96d0f3e7e 100644
--- a/deps/v8/src/full-codegen.h
+++ b/deps/v8/src/full-codegen.h
@@ -68,11 +68,9 @@ class FullCodeGenerator: public AstVisitor {
SECONDARY
};
- FullCodeGenerator(MacroAssembler* masm, Handle<Script> script, bool is_eval)
+ explicit FullCodeGenerator(MacroAssembler* masm)
: masm_(masm),
- script_(script),
- is_eval_(is_eval),
- function_(NULL),
+ info_(NULL),
nesting_stack_(NULL),
loop_depth_(0),
location_(kStack),
@@ -80,11 +78,9 @@ class FullCodeGenerator: public AstVisitor {
false_label_(NULL) {
}
- static Handle<Code> MakeCode(FunctionLiteral* fun,
- Handle<Script> script,
- bool is_eval);
+ static Handle<Code> MakeCode(CompilationInfo* info);
- void Generate(FunctionLiteral* fun, Mode mode);
+ void Generate(CompilationInfo* info, Mode mode);
private:
class Breakable;
@@ -408,6 +404,12 @@ class FullCodeGenerator: public AstVisitor {
}
MacroAssembler* masm() { return masm_; }
+
+ Handle<Script> script() { return info_->script(); }
+ bool is_eval() { return info_->is_eval(); }
+ FunctionLiteral* function() { return info_->function(); }
+ Scope* scope() { return info_->scope(); }
+
static Register result_register();
static Register context_register();
@@ -427,10 +429,7 @@ class FullCodeGenerator: public AstVisitor {
void EmitLogicalOperation(BinaryOperation* expr);
MacroAssembler* masm_;
- Handle<Script> script_;
- bool is_eval_;
-
- FunctionLiteral* function_;
+ CompilationInfo* info_;
Label return_label_;
NestedStatement* nesting_stack_;
diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h
index 39f6bcb26..8f6f47c65 100644
--- a/deps/v8/src/globals.h
+++ b/deps/v8/src/globals.h
@@ -46,6 +46,9 @@ namespace internal {
#elif defined(__ARMEL__)
#define V8_HOST_ARCH_ARM 1
#define V8_HOST_ARCH_32_BIT 1
+#elif defined(_MIPS_ARCH_MIPS32R2)
+#define V8_HOST_ARCH_MIPS 1
+#define V8_HOST_ARCH_32_BIT 1
#else
#error Your host architecture was not detected as supported by v8
#endif
@@ -53,6 +56,7 @@ namespace internal {
#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_IA32)
#define V8_TARGET_CAN_READ_UNALIGNED 1
#elif V8_TARGET_ARCH_ARM
+#elif V8_TARGET_ARCH_MIPS
#else
#error Your target architecture is not supported by v8
#endif
@@ -608,6 +612,7 @@ enum CpuFeature { SSE3 = 32, // x86
RDTSC = 4, // x86
CPUID = 10, // x86
VFP3 = 1, // ARM
+ ARMv7 = 2, // ARM
SAHF = 0}; // x86
} } // namespace v8::internal
diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles.cc
index c66056ebb..971c9164a 100644
--- a/deps/v8/src/handles.cc
+++ b/deps/v8/src/handles.cc
@@ -300,6 +300,12 @@ Handle<Object> GetPrototype(Handle<Object> obj) {
}
+Handle<Object> SetPrototype(Handle<JSObject> obj, Handle<Object> value) {
+ const bool skip_hidden_prototypes = false;
+ CALL_HEAP_FUNCTION(obj->SetPrototype(*value, skip_hidden_prototypes), Object);
+}
+
+
Handle<Object> GetHiddenProperties(Handle<JSObject> obj,
bool create_if_needed) {
Object* holder = obj->BypassGlobalProxy();
@@ -477,25 +483,25 @@ void InitScriptLineEnds(Handle<Script> script) {
int GetScriptLineNumber(Handle<Script> script, int code_pos) {
InitScriptLineEnds(script);
AssertNoAllocation no_allocation;
- FixedArray* line_ends_array =
- FixedArray::cast(script->line_ends());
+ FixedArray* line_ends_array = FixedArray::cast(script->line_ends());
const int line_ends_len = line_ends_array->length();
- int line = -1;
- if (line_ends_len > 0 &&
- code_pos <= (Smi::cast(line_ends_array->get(0)))->value()) {
- line = 0;
- } else {
- for (int i = 1; i < line_ends_len; ++i) {
- if ((Smi::cast(line_ends_array->get(i - 1)))->value() < code_pos &&
- code_pos <= (Smi::cast(line_ends_array->get(i)))->value()) {
- line = i;
- break;
- }
+ if (!line_ends_len)
+ return -1;
+
+ if ((Smi::cast(line_ends_array->get(0)))->value() >= code_pos)
+ return script->line_offset()->value();
+
+ int left = 0;
+ int right = line_ends_len;
+ while (int half = (right - left) / 2) {
+ if ((Smi::cast(line_ends_array->get(left + half)))->value() > code_pos) {
+ right -= half;
+ } else {
+ left += half;
}
}
-
- return line != -1 ? line + script->line_offset()->value() : line;
+ return right + script->line_offset()->value();
}
@@ -686,7 +692,7 @@ static bool CompileLazyHelper(CompilationInfo* info,
bool CompileLazyShared(Handle<SharedFunctionInfo> shared,
ClearExceptionFlag flag) {
- CompilationInfo info(shared, Handle<Object>::null(), 0);
+ CompilationInfo info(shared);
return CompileLazyHelper(&info, flag);
}
@@ -694,8 +700,7 @@ bool CompileLazyShared(Handle<SharedFunctionInfo> shared,
bool CompileLazy(Handle<JSFunction> function,
Handle<Object> receiver,
ClearExceptionFlag flag) {
- Handle<SharedFunctionInfo> shared(function->shared());
- CompilationInfo info(shared, receiver, 0);
+ CompilationInfo info(function, 0, receiver);
bool result = CompileLazyHelper(&info, flag);
LOG(FunctionCreateEvent(*function));
return result;
@@ -705,8 +710,7 @@ bool CompileLazy(Handle<JSFunction> function,
bool CompileLazyInLoop(Handle<JSFunction> function,
Handle<Object> receiver,
ClearExceptionFlag flag) {
- Handle<SharedFunctionInfo> shared(function->shared());
- CompilationInfo info(shared, receiver, 1);
+ CompilationInfo info(function, 1, receiver);
bool result = CompileLazyHelper(&info, flag);
LOG(FunctionCreateEvent(*function));
return result;
@@ -766,7 +770,8 @@ void LoadLazy(Handle<JSObject> obj, bool* pending_exception) {
Handle<String> script_name = Factory::NewStringFromAscii(name);
bool allow_natives_syntax = FLAG_allow_natives_syntax;
FLAG_allow_natives_syntax = true;
- boilerplate = Compiler::Compile(source_code, script_name, 0, 0, NULL, NULL);
+ boilerplate = Compiler::Compile(source_code, script_name, 0, 0, NULL, NULL,
+ Handle<String>::null());
FLAG_allow_natives_syntax = allow_natives_syntax;
// If the compilation failed (possibly due to stack overflows), we
// should never enter the result in the natives cache. Instead we
diff --git a/deps/v8/src/handles.h b/deps/v8/src/handles.h
index 04f087bd8..90e51fa56 100644
--- a/deps/v8/src/handles.h
+++ b/deps/v8/src/handles.h
@@ -240,6 +240,8 @@ Handle<Object> GetPropertyWithInterceptor(Handle<JSObject> receiver,
Handle<Object> GetPrototype(Handle<Object> obj);
+Handle<Object> SetPrototype(Handle<JSObject> obj, Handle<Object> value);
+
// Return the object's hidden properties object. If the object has no hidden
// properties and create_if_needed is true, then a new hidden property object
// will be allocated. Otherwise the Heap::undefined_value is returned.
diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc
index 5f4d81501..fc4e666b9 100644
--- a/deps/v8/src/heap.cc
+++ b/deps/v8/src/heap.cc
@@ -1498,12 +1498,6 @@ void Heap::CreateRegExpCEntryStub() {
#endif
-void Heap::CreateCEntryDebugBreakStub() {
- DebuggerStatementStub stub;
- set_debugger_statement_code(*stub.GetCode());
-}
-
-
void Heap::CreateJSEntryStub() {
JSEntryStub stub;
set_js_entry_code(*stub.GetCode());
@@ -1531,7 +1525,6 @@ void Heap::CreateFixedStubs() {
// }
// To workaround the problem, make separate functions without inlining.
Heap::CreateCEntryStub();
- Heap::CreateCEntryDebugBreakStub();
Heap::CreateJSEntryStub();
Heap::CreateJSConstructEntryStub();
#if V8_TARGET_ARCH_ARM && V8_NATIVE_REGEXP
@@ -1774,6 +1767,7 @@ Object* Heap::SmiOrNumberFromDouble(double value,
Object* Heap::NumberToString(Object* number) {
+ Counters::number_to_string_runtime.Increment();
Object* cached = GetNumberStringCache(number);
if (cached != undefined_value()) {
return cached;
@@ -2389,12 +2383,13 @@ Object* Heap::AllocateInitialMap(JSFunction* fun) {
map->set_unused_property_fields(in_object_properties);
map->set_prototype(prototype);
- // If the function has only simple this property assignments add field
- // descriptors for these to the initial map as the object cannot be
- // constructed without having these properties.
+ // If the function has only simple this property assignments add
+ // field descriptors for these to the initial map as the object
+ // cannot be constructed without having these properties. Guard by
+ // the inline_new flag so we only change the map if we generate a
+ // specialized construct stub.
ASSERT(in_object_properties <= Map::kMaxPreAllocatedPropertyFields);
- if (fun->shared()->has_only_simple_this_property_assignments() &&
- fun->shared()->this_property_assignments_count() > 0) {
+ if (fun->shared()->CanGenerateInlineConstructor(prototype)) {
int count = fun->shared()->this_property_assignments_count();
if (count > in_object_properties) {
count = in_object_properties;
diff --git a/deps/v8/src/heap.h b/deps/v8/src/heap.h
index cbf0b73ed..22ab875bb 100644
--- a/deps/v8/src/heap.h
+++ b/deps/v8/src/heap.h
@@ -101,7 +101,6 @@ namespace internal {
V(Code, js_entry_code, JsEntryCode) \
V(Code, js_construct_entry_code, JsConstructEntryCode) \
V(Code, c_entry_code, CEntryCode) \
- V(Code, debugger_statement_code, DebuggerStatementCode) \
V(FixedArray, number_string_cache, NumberStringCache) \
V(FixedArray, single_character_string_cache, SingleCharacterStringCache) \
V(FixedArray, natives_source_cache, NativesSourceCache) \
@@ -1046,7 +1045,6 @@ class Heap : public AllStatic {
// These four Create*EntryStub functions are here because of a gcc-4.4 bug
// that assigns wrong vtable entries.
static void CreateCEntryStub();
- static void CreateCEntryDebugBreakStub();
static void CreateJSEntryStub();
static void CreateJSConstructEntryStub();
static void CreateRegExpCEntryStub();
diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc
index dc017ae32..ffcefe0b5 100644
--- a/deps/v8/src/ia32/assembler-ia32.cc
+++ b/deps/v8/src/ia32/assembler-ia32.cc
@@ -267,7 +267,7 @@ bool Operand::is_reg(Register reg) const {
}
// -----------------------------------------------------------------------------
-// Implementation of Assembler
+// Implementation of Assembler.
// Emit a single byte. Must always be inlined.
#define EMIT(x) \
@@ -278,12 +278,12 @@ bool Operand::is_reg(Register reg) const {
static void InitCoverageLog();
#endif
-// spare_buffer_
+// Spare buffer.
byte* Assembler::spare_buffer_ = NULL;
Assembler::Assembler(void* buffer, int buffer_size) {
if (buffer == NULL) {
- // do our own buffer management
+ // Do our own buffer management.
if (buffer_size <= kMinimalBufferSize) {
buffer_size = kMinimalBufferSize;
@@ -300,7 +300,7 @@ Assembler::Assembler(void* buffer, int buffer_size) {
buffer_size_ = buffer_size;
own_buffer_ = true;
} else {
- // use externally provided buffer instead
+ // Use externally provided buffer instead.
ASSERT(buffer_size > 0);
buffer_ = static_cast<byte*>(buffer);
buffer_size_ = buffer_size;
@@ -316,7 +316,7 @@ Assembler::Assembler(void* buffer, int buffer_size) {
}
#endif
- // setup buffer pointers
+ // Setup buffer pointers.
ASSERT(buffer_ != NULL);
pc_ = buffer_;
reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
@@ -344,11 +344,10 @@ Assembler::~Assembler() {
void Assembler::GetCode(CodeDesc* desc) {
- // finalize code
- // (at this point overflow() may be true, but the gap ensures that
- // we are still not overlapping instructions and relocation info)
- ASSERT(pc_ <= reloc_info_writer.pos()); // no overlap
- // setup desc
+ // Finalize code (at this point overflow() may be true, but the gap ensures
+ // that we are still not overlapping instructions and relocation info).
+ ASSERT(pc_ <= reloc_info_writer.pos()); // No overlap.
+ // Setup code descriptor.
desc->buffer = buffer_;
desc->buffer_size = buffer_size_;
desc->instr_size = pc_offset();
@@ -435,7 +434,7 @@ void Assembler::push(const Operand& src) {
void Assembler::pop(Register dst) {
ASSERT(reloc_info_writer.last_pc() != NULL);
if (FLAG_push_pop_elimination && (reloc_info_writer.last_pc() <= last_pc_)) {
- // (last_pc_ != NULL) is rolled into the above check
+ // (last_pc_ != NULL) is rolled into the above check.
// If a last_pc_ is set, we need to make sure that there has not been any
// relocation information generated between the last instruction and this
// pop instruction.
@@ -461,7 +460,7 @@ void Assembler::pop(Register dst) {
return;
} else if (instr == 0xff) { // push of an operand, convert to a move
byte op1 = last_pc_[1];
- // Check if the operation is really a push
+ // Check if the operation is really a push.
if ((op1 & 0x38) == (6 << 3)) {
op1 = (op1 & ~0x38) | static_cast<byte>(dst.code() << 3);
last_pc_[0] = 0x8b;
@@ -747,7 +746,7 @@ void Assembler::cmov(Condition cc, Register dst, const Operand& src) {
ASSERT(CpuFeatures::IsEnabled(CMOV));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
- // Opcode: 0f 40 + cc /r
+ // Opcode: 0f 40 + cc /r.
EMIT(0x0F);
EMIT(0x40 + cc);
emit_operand(dst, src);
@@ -765,7 +764,7 @@ void Assembler::rep_movs() {
void Assembler::xchg(Register dst, Register src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
- if (src.is(eax) || dst.is(eax)) { // Single-byte encoding
+ if (src.is(eax) || dst.is(eax)) { // Single-byte encoding.
EMIT(0x90 | (src.is(eax) ? dst.code() : src.code()));
} else {
EMIT(0x87);
@@ -1434,7 +1433,7 @@ void Assembler::bind_to(Label* L, int pos) {
if (disp.type() == Displacement::UNCONDITIONAL_JUMP) {
ASSERT(byte_at(fixup_pos - 1) == 0xE9); // jmp expected
}
- // relative address, relative to point after address
+ // Relative address, relative to point after address.
int imm32 = pos - (fixup_pos + sizeof(int32_t));
long_at_put(fixup_pos, imm32);
}
@@ -1449,7 +1448,7 @@ void Assembler::link_to(Label* L, Label* appendix) {
last_pc_ = NULL;
if (appendix->is_linked()) {
if (L->is_linked()) {
- // append appendix to L's list
+ // Append appendix to L's list.
Label p;
Label q = *L;
do {
@@ -1462,7 +1461,7 @@ void Assembler::link_to(Label* L, Label* appendix) {
disp_at_put(&p, disp);
p.Unuse(); // to avoid assertion failure in ~Label
} else {
- // L is empty, simply use appendix
+ // L is empty, simply use appendix.
*L = *appendix;
}
}
@@ -1485,11 +1484,11 @@ void Assembler::call(Label* L) {
const int long_size = 5;
int offs = L->pos() - pc_offset();
ASSERT(offs <= 0);
- // 1110 1000 #32-bit disp
+ // 1110 1000 #32-bit disp.
EMIT(0xE8);
emit(offs - long_size);
} else {
- // 1110 1000 #32-bit disp
+ // 1110 1000 #32-bit disp.
EMIT(0xE8);
emit_disp(L, Displacement::OTHER);
}
@@ -1532,16 +1531,16 @@ void Assembler::jmp(Label* L) {
int offs = L->pos() - pc_offset();
ASSERT(offs <= 0);
if (is_int8(offs - short_size)) {
- // 1110 1011 #8-bit disp
+ // 1110 1011 #8-bit disp.
EMIT(0xEB);
EMIT((offs - short_size) & 0xFF);
} else {
- // 1110 1001 #32-bit disp
+ // 1110 1001 #32-bit disp.
EMIT(0xE9);
emit(offs - long_size);
}
} else {
- // 1110 1001 #32-bit disp
+ // 1110 1001 #32-bit disp.
EMIT(0xE9);
emit_disp(L, Displacement::UNCONDITIONAL_JUMP);
}
@@ -1611,7 +1610,7 @@ void Assembler::j(Condition cc, byte* entry, RelocInfo::Mode rmode, Hint hint) {
last_pc_ = pc_;
ASSERT((0 <= cc) && (cc < 16));
if (FLAG_emit_branch_hints && hint != no_hint) EMIT(hint);
- // 0000 1111 1000 tttn #32-bit disp
+ // 0000 1111 1000 tttn #32-bit disp.
EMIT(0x0F);
EMIT(0x80 | cc);
emit(entry - (pc_ + sizeof(int32_t)), rmode);
@@ -1629,7 +1628,7 @@ void Assembler::j(Condition cc, Handle<Code> code, Hint hint) {
}
-// FPU instructions
+// FPU instructions.
void Assembler::fld(int i) {
EnsureSpace ensure_space(this);
@@ -2225,10 +2224,10 @@ void Assembler::WriteRecordedPositions() {
void Assembler::GrowBuffer() {
- ASSERT(overflow()); // should not call this otherwise
+ ASSERT(overflow());
if (!own_buffer_) FATAL("external code buffer is too small");
- // compute new buffer size
+ // Compute new buffer size.
CodeDesc desc; // the new buffer
if (buffer_size_ < 4*KB) {
desc.buffer_size = 4*KB;
@@ -2242,7 +2241,7 @@ void Assembler::GrowBuffer() {
V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
}
- // setup new buffer
+ // Setup new buffer.
desc.buffer = NewArray<byte>(desc.buffer_size);
desc.instr_size = pc_offset();
desc.reloc_size = (buffer_ + buffer_size_) - (reloc_info_writer.pos());
@@ -2253,14 +2252,14 @@ void Assembler::GrowBuffer() {
memset(desc.buffer, 0xCC, desc.buffer_size);
#endif
- // copy the data
+ // Copy the data.
int pc_delta = desc.buffer - buffer_;
int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
memmove(desc.buffer, buffer_, desc.instr_size);
memmove(rc_delta + reloc_info_writer.pos(),
reloc_info_writer.pos(), desc.reloc_size);
- // switch buffers
+ // Switch buffers.
if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
spare_buffer_ = buffer_;
} else {
@@ -2275,7 +2274,7 @@ void Assembler::GrowBuffer() {
reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
reloc_info_writer.last_pc() + pc_delta);
- // relocate runtime entries
+ // Relocate runtime entries.
for (RelocIterator it(desc); !it.done(); it.next()) {
RelocInfo::Mode rmode = it.rinfo()->rmode();
if (rmode == RelocInfo::RUNTIME_ENTRY) {
diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h
index 9ce073437..3d7af82ad 100644
--- a/deps/v8/src/ia32/assembler-ia32.h
+++ b/deps/v8/src/ia32/assembler-ia32.h
@@ -77,7 +77,7 @@ struct Register {
return 1 << code_;
}
- // (unfortunately we can't make this private in a struct)
+ // Unfortunately we can't make this private in a struct.
int code_;
};
@@ -231,7 +231,8 @@ enum ScaleFactor {
times_8 = 3,
times_int_size = times_4,
times_half_pointer_size = times_2,
- times_pointer_size = times_4
+ times_pointer_size = times_4,
+ times_twice_pointer_size = times_8
};
diff --git a/deps/v8/src/ia32/builtins-ia32.cc b/deps/v8/src/ia32/builtins-ia32.cc
index 2c5b1d1f5..54ef382a3 100644
--- a/deps/v8/src/ia32/builtins-ia32.cc
+++ b/deps/v8/src/ia32/builtins-ia32.cc
@@ -93,7 +93,10 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// edi: called object
// eax: number of arguments
__ bind(&non_function_call);
-
+ // CALL_NON_FUNCTION expects the non-function constructor as receiver
+ // (instead of the original receiver from the call site). The receiver is
+ // stack element argc+1.
+ __ mov(Operand(esp, eax, times_4, kPointerSize), edi);
// Set expected number of arguments to zero (not changing eax).
__ Set(ebx, Immediate(0));
__ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
@@ -437,33 +440,26 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ bind(&done);
}
- // 2. Get the function to call from the stack.
- { Label done, non_function, function;
- // +1 ~ return address.
- __ mov(edi, Operand(esp, eax, times_4, +1 * kPointerSize));
- __ test(edi, Immediate(kSmiTagMask));
- __ j(zero, &non_function, not_taken);
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(equal, &function, taken);
-
- // Non-function called: Clear the function to force exception.
- __ bind(&non_function);
- __ xor_(edi, Operand(edi));
- __ jmp(&done);
-
- // Function called: Change context eagerly to get the right global object.
- __ bind(&function);
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+ // 2. Get the function to call (passed as receiver) from the stack, check
+ // if it is a function.
+ Label non_function;
+ // 1 ~ return address.
+ __ mov(edi, Operand(esp, eax, times_4, 1 * kPointerSize));
+ __ test(edi, Immediate(kSmiTagMask));
+ __ j(zero, &non_function, not_taken);
+ __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
+ __ j(not_equal, &non_function, not_taken);
- __ bind(&done);
- }
- // 3. Make sure first argument is an object; convert if necessary.
- { Label call_to_object, use_global_receiver, patch_receiver, done;
- __ mov(ebx, Operand(esp, eax, times_4, 0));
+ // 3a. Patch the first argument if necessary when calling a function.
+ Label shift_arguments;
+ { Label convert_to_object, use_global_receiver, patch_receiver;
+ // Change context eagerly in case we need the global receiver.
+ __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+ __ mov(ebx, Operand(esp, eax, times_4, 0)); // First argument.
__ test(ebx, Immediate(kSmiTagMask));
- __ j(zero, &call_to_object);
+ __ j(zero, &convert_to_object);
__ cmp(ebx, Factory::null_value());
__ j(equal, &use_global_receiver);
@@ -473,31 +469,28 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset));
__ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
__ cmp(ecx, FIRST_JS_OBJECT_TYPE);
- __ j(less, &call_to_object);
+ __ j(below, &convert_to_object);
__ cmp(ecx, LAST_JS_OBJECT_TYPE);
- __ j(less_equal, &done);
+ __ j(below_equal, &shift_arguments);
- __ bind(&call_to_object);
- __ EnterInternalFrame(); // preserves eax, ebx, edi
-
- // Store the arguments count on the stack (smi tagged).
+ __ bind(&convert_to_object);
+ __ EnterInternalFrame(); // In order to preserve argument count.
__ SmiTag(eax);
__ push(eax);
- __ push(edi); // save edi across the call
__ push(ebx);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ mov(ebx, eax);
- __ pop(edi); // restore edi after the call
- // Get the arguments count and untag it.
__ pop(eax);
__ SmiUntag(eax);
-
__ LeaveInternalFrame();
+ // Restore the function to edi.
+ __ mov(edi, Operand(esp, eax, times_4, 1 * kPointerSize));
__ jmp(&patch_receiver);
- // Use the global receiver object from the called function as the receiver.
+ // Use the global receiver object from the called function as the
+ // receiver.
__ bind(&use_global_receiver);
const int kGlobalIndex =
Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
@@ -509,50 +502,55 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ bind(&patch_receiver);
__ mov(Operand(esp, eax, times_4, 0), ebx);
- __ bind(&done);
+ __ jmp(&shift_arguments);
}
- // 4. Check that the function really is a function.
- { Label done;
- __ test(edi, Operand(edi));
- __ j(not_zero, &done, taken);
- __ xor_(ebx, Operand(ebx));
- // CALL_NON_FUNCTION will expect to find the non-function callee on the
- // expression stack of the caller. Transfer it from receiver to the
- // caller's expression stack (and make the first argument the receiver
- // for CALL_NON_FUNCTION) by decrementing the argument count.
- __ dec(eax);
- __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
- __ jmp(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
- RelocInfo::CODE_TARGET);
- __ bind(&done);
- }
-
- // 5. Shift arguments and return address one slot down on the stack
- // (overwriting the receiver).
+ // 3b. Patch the first argument when calling a non-function. The
+ // CALL_NON_FUNCTION builtin expects the non-function callee as
+ // receiver, so overwrite the first argument which will ultimately
+ // become the receiver.
+ __ bind(&non_function);
+ __ mov(Operand(esp, eax, times_4, 0), edi);
+ // Clear edi to indicate a non-function being called.
+ __ xor_(edi, Operand(edi));
+
+ // 4. Shift arguments and return address one slot down on the stack
+ // (overwriting the original receiver). Adjust argument count to make
+ // the original first argument the new receiver.
+ __ bind(&shift_arguments);
{ Label loop;
__ mov(ecx, eax);
__ bind(&loop);
__ mov(ebx, Operand(esp, ecx, times_4, 0));
__ mov(Operand(esp, ecx, times_4, kPointerSize), ebx);
__ dec(ecx);
- __ j(not_sign, &loop);
+ __ j(not_sign, &loop); // While non-negative (to copy return address).
__ pop(ebx); // Discard copy of return address.
__ dec(eax); // One fewer argument (first argument is new receiver).
}
- // 6. Get the code to call from the function and check that the number of
- // expected arguments matches what we're providing.
- { __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(ebx,
- FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
- __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
- __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
- __ cmp(eax, Operand(ebx));
- __ j(not_equal, Handle<Code>(builtin(ArgumentsAdaptorTrampoline)));
+ // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin.
+ { Label function;
+ __ test(edi, Operand(edi));
+ __ j(not_zero, &function, taken);
+ __ xor_(ebx, Operand(ebx));
+ __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
+ __ jmp(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
+ RelocInfo::CODE_TARGET);
+ __ bind(&function);
}
- // 7. Jump (tail-call) to the code in register edx without checking arguments.
+ // 5b. Get the code to call from the function and check that the number of
+ // expected arguments matches what we're providing. If so, jump
+ // (tail-call) to the code in register edx without checking arguments.
+ __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(ebx,
+ FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
+ __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
+ __ cmp(eax, Operand(ebx));
+ __ j(not_equal, Handle<Code>(builtin(ArgumentsAdaptorTrampoline)));
+
ParameterCount expected(0);
__ InvokeCode(Operand(edx), expected, expected, JUMP_FUNCTION);
}
@@ -647,9 +645,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ mov(eax, Operand(ebp, kIndexOffset));
__ jmp(&entry);
__ bind(&loop);
- __ mov(ecx, Operand(ebp, 2 * kPointerSize)); // load arguments
- __ push(ecx);
- __ push(eax);
+ __ mov(edx, Operand(ebp, 2 * kPointerSize)); // load arguments
// Use inline caching to speed up access to arguments.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
@@ -659,8 +655,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// we have generated an inline version of the keyed load. In this
// case, we know that we are not generating a test instruction next.
- // Remove IC arguments from the stack and push the nth argument.
- __ add(Operand(esp), Immediate(2 * kPointerSize));
+ // Push the nth argument.
__ push(eax);
// Update the index on the stack and in register eax.
diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc
index fe91903e5..7ec3ff4c5 100644
--- a/deps/v8/src/ia32/codegen-ia32.cc
+++ b/deps/v8/src/ia32/codegen-ia32.cc
@@ -103,14 +103,10 @@ CodeGenState::~CodeGenState() {
// -------------------------------------------------------------------------
// CodeGenerator implementation
-CodeGenerator::CodeGenerator(MacroAssembler* masm,
- Handle<Script> script,
- bool is_eval)
- : is_eval_(is_eval),
- script_(script),
- deferred_(8),
+CodeGenerator::CodeGenerator(MacroAssembler* masm)
+ : deferred_(8),
masm_(masm),
- scope_(NULL),
+ info_(NULL),
frame_(NULL),
allocator_(NULL),
state_(NULL),
@@ -120,23 +116,21 @@ CodeGenerator::CodeGenerator(MacroAssembler* masm,
}
+Scope* CodeGenerator::scope() { return info_->function()->scope(); }
+
+
// Calling conventions:
// ebp: caller's frame pointer
// esp: stack pointer
// edi: called JS function
// esi: callee's context
-void CodeGenerator::Generate(FunctionLiteral* fun,
- Mode mode,
- CompilationInfo* info) {
+void CodeGenerator::Generate(CompilationInfo* info, Mode mode) {
// Record the position for debugging purposes.
- CodeForFunctionPosition(fun);
-
- ZoneList<Statement*>* body = fun->body();
+ CodeForFunctionPosition(info->function());
// Initialize state.
- ASSERT(scope_ == NULL);
- scope_ = fun->scope();
+ info_ = info;
ASSERT(allocator_ == NULL);
RegisterAllocator register_allocator(this);
allocator_ = &register_allocator;
@@ -151,7 +145,7 @@ void CodeGenerator::Generate(FunctionLiteral* fun,
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
- fun->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
+ info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
frame_->SpillAll();
__ int3();
}
@@ -177,7 +171,7 @@ void CodeGenerator::Generate(FunctionLiteral* fun,
frame_->AllocateStackSlots();
// Allocate the local context if needed.
- int heap_slots = scope_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
Comment cmnt(masm_, "[ allocate local context");
// Allocate local context.
@@ -207,7 +201,6 @@ void CodeGenerator::Generate(FunctionLiteral* fun,
// 3) don't copy parameter operand code from SlotOperand!
{
Comment cmnt2(masm_, "[ copy context parameters into .context");
-
// Note that iteration order is relevant here! If we have the same
// parameter twice (e.g., function (x, y, x)), and that parameter
// needs to be copied into the context, it must be the last argument
@@ -216,15 +209,15 @@ void CodeGenerator::Generate(FunctionLiteral* fun,
// order: such a parameter is copied repeatedly into the same
// context location and thus the last value is what is seen inside
// the function.
- for (int i = 0; i < scope_->num_parameters(); i++) {
- Variable* par = scope_->parameter(i);
+ for (int i = 0; i < scope()->num_parameters(); i++) {
+ Variable* par = scope()->parameter(i);
Slot* slot = par->slot();
if (slot != NULL && slot->type() == Slot::CONTEXT) {
// The use of SlotOperand below is safe in unspilled code
// because the slot is guaranteed to be a context slot.
//
// There are no parameters in the global scope.
- ASSERT(!scope_->is_global_scope());
+ ASSERT(!scope()->is_global_scope());
frame_->PushParameterAt(i);
Result value = frame_->Pop();
value.ToRegister();
@@ -252,9 +245,9 @@ void CodeGenerator::Generate(FunctionLiteral* fun,
}
// Initialize ThisFunction reference if present.
- if (scope_->is_function_scope() && scope_->function() != NULL) {
+ if (scope()->is_function_scope() && scope()->function() != NULL) {
frame_->Push(Factory::the_hole_value());
- StoreToSlot(scope_->function()->slot(), NOT_CONST_INIT);
+ StoreToSlot(scope()->function()->slot(), NOT_CONST_INIT);
}
} else {
// When used as the secondary compiler for splitting, ebp, esi,
@@ -272,12 +265,12 @@ void CodeGenerator::Generate(FunctionLiteral* fun,
// Generate code to 'execute' declarations and initialize functions
// (source elements). In case of an illegal redeclaration we need to
// handle that instead of processing the declarations.
- if (scope_->HasIllegalRedeclaration()) {
+ if (scope()->HasIllegalRedeclaration()) {
Comment cmnt(masm_, "[ illegal redeclarations");
- scope_->VisitIllegalRedeclaration(this);
+ scope()->VisitIllegalRedeclaration(this);
} else {
Comment cmnt(masm_, "[ declarations");
- ProcessDeclarations(scope_->declarations());
+ ProcessDeclarations(scope()->declarations());
// Bail out if a stack-overflow exception occurred when processing
// declarations.
if (HasStackOverflow()) return;
@@ -292,7 +285,7 @@ void CodeGenerator::Generate(FunctionLiteral* fun,
// Compile the body of the function in a vanilla state. Don't
// bother compiling all the code if the scope has an illegal
// redeclaration.
- if (!scope_->HasIllegalRedeclaration()) {
+ if (!scope()->HasIllegalRedeclaration()) {
Comment cmnt(masm_, "[ function body");
#ifdef DEBUG
bool is_builtin = Bootstrapper::IsActive();
@@ -303,14 +296,14 @@ void CodeGenerator::Generate(FunctionLiteral* fun,
// Ignore the return value.
}
#endif
- VisitStatements(body);
+ VisitStatements(info->function()->body());
// Handle the return from the function.
if (has_valid_frame()) {
// If there is a valid frame, control flow can fall off the end of
// the body. In that case there is an implicit return statement.
ASSERT(!function_return_is_shadowed_);
- CodeForReturnPosition(fun);
+ CodeForReturnPosition(info->function());
frame_->PrepareForReturn();
Result undefined(Factory::undefined_value());
if (function_return_.is_bound()) {
@@ -353,7 +346,6 @@ void CodeGenerator::Generate(FunctionLiteral* fun,
// There is no need to delete the register allocator, it is a
// stack-allocated local.
allocator_ = NULL;
- scope_ = NULL;
}
@@ -582,7 +574,9 @@ void CodeGenerator::LoadTypeofExpression(Expression* expr) {
} else if (variable != NULL && variable->slot() != NULL) {
// For a variable that rewrites to a slot, we signal it is the immediate
// subexpression of a typeof.
- LoadFromSlotCheckForArguments(variable->slot(), INSIDE_TYPEOF);
+ Result result =
+ LoadFromSlotCheckForArguments(variable->slot(), INSIDE_TYPEOF);
+ frame()->Push(&result);
} else {
// Anything else can be handled normally.
Load(expr);
@@ -590,13 +584,13 @@ void CodeGenerator::LoadTypeofExpression(Expression* expr) {
}
-ArgumentsAllocationMode CodeGenerator::ArgumentsMode() const {
- if (scope_->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
- ASSERT(scope_->arguments_shadow() != NULL);
+ArgumentsAllocationMode CodeGenerator::ArgumentsMode() {
+ if (scope()->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
+ ASSERT(scope()->arguments_shadow() != NULL);
// We don't want to do lazy arguments allocation for functions that
// have heap-allocated contexts, because it interfers with the
// uninitialized const tracking in the context objects.
- return (scope_->num_heap_slots() > 0)
+ return (scope()->num_heap_slots() > 0)
? EAGER_ARGUMENTS_ALLOCATION
: LAZY_ARGUMENTS_ALLOCATION;
}
@@ -616,13 +610,13 @@ Result CodeGenerator::StoreArgumentsObject(bool initial) {
ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
frame_->PushFunction();
frame_->PushReceiverSlotAddress();
- frame_->Push(Smi::FromInt(scope_->num_parameters()));
+ frame_->Push(Smi::FromInt(scope()->num_parameters()));
Result result = frame_->CallStub(&stub, 3);
frame_->Push(&result);
}
- Variable* arguments = scope_->arguments()->var();
- Variable* shadow = scope_->arguments_shadow()->var();
+ Variable* arguments = scope()->arguments()->var();
+ Variable* shadow = scope()->arguments_shadow()->var();
ASSERT(arguments != NULL && arguments->slot() != NULL);
ASSERT(shadow != NULL && shadow->slot() != NULL);
JumpTarget done;
@@ -631,8 +625,7 @@ Result CodeGenerator::StoreArgumentsObject(bool initial) {
// We have to skip storing into the arguments slot if it has already
// been written to. This can happen if the a function has a local
// variable named 'arguments'.
- LoadFromSlot(arguments->slot(), NOT_INSIDE_TYPEOF);
- Result probe = frame_->Pop();
+ Result probe = LoadFromSlot(arguments->slot(), NOT_INSIDE_TYPEOF);
if (probe.is_constant()) {
// We have to skip updating the arguments object if it has
// been assigned a proper value.
@@ -729,35 +722,54 @@ void CodeGenerator::ToBoolean(ControlDestination* dest) {
// The value to convert should be popped from the frame.
Result value = frame_->Pop();
value.ToRegister();
- // Fast case checks.
- // 'false' => false.
- __ cmp(value.reg(), Factory::false_value());
- dest->false_target()->Branch(equal);
+ if (value.is_number()) {
+ Comment cmnt(masm_, "ONLY_NUMBER");
+ // Fast case if NumberInfo indicates only numbers.
+ if (FLAG_debug_code) {
+ __ AbortIfNotNumber(value.reg(), "ToBoolean operand is not a number.");
+ }
+ // Smi => false iff zero.
+ ASSERT(kSmiTag == 0);
+ __ test(value.reg(), Operand(value.reg()));
+ dest->false_target()->Branch(zero);
+ __ test(value.reg(), Immediate(kSmiTagMask));
+ dest->true_target()->Branch(zero);
+ __ fldz();
+ __ fld_d(FieldOperand(value.reg(), HeapNumber::kValueOffset));
+ __ FCmp();
+ value.Unuse();
+ dest->Split(not_zero);
+ } else {
+ // Fast case checks.
+ // 'false' => false.
+ __ cmp(value.reg(), Factory::false_value());
+ dest->false_target()->Branch(equal);
+
+ // 'true' => true.
+ __ cmp(value.reg(), Factory::true_value());
+ dest->true_target()->Branch(equal);
- // 'true' => true.
- __ cmp(value.reg(), Factory::true_value());
- dest->true_target()->Branch(equal);
+ // 'undefined' => false.
+ __ cmp(value.reg(), Factory::undefined_value());
+ dest->false_target()->Branch(equal);
- // 'undefined' => false.
- __ cmp(value.reg(), Factory::undefined_value());
- dest->false_target()->Branch(equal);
+ // Smi => false iff zero.
+ ASSERT(kSmiTag == 0);
+ __ test(value.reg(), Operand(value.reg()));
+ dest->false_target()->Branch(zero);
+ __ test(value.reg(), Immediate(kSmiTagMask));
+ dest->true_target()->Branch(zero);
- // Smi => false iff zero.
- ASSERT(kSmiTag == 0);
- __ test(value.reg(), Operand(value.reg()));
- dest->false_target()->Branch(zero);
- __ test(value.reg(), Immediate(kSmiTagMask));
- dest->true_target()->Branch(zero);
-
- // Call the stub for all other cases.
- frame_->Push(&value); // Undo the Pop() from above.
- ToBooleanStub stub;
- Result temp = frame_->CallStub(&stub, 1);
- // Convert the result to a condition code.
- __ test(temp.reg(), Operand(temp.reg()));
- temp.Unuse();
- dest->Split(not_equal);
+ // Call the stub for all other cases.
+ frame_->Push(&value); // Undo the Pop() from above.
+ ToBooleanStub stub;
+ Result temp = frame_->CallStub(&stub, 1);
+ // Convert the result to a condition code.
+ __ test(temp.reg(), Operand(temp.reg()));
+ temp.Unuse();
+ dest->Split(not_equal);
+ }
}
@@ -797,6 +809,10 @@ class FloatingPointHelper : public AllStatic {
static void LoadAsIntegers(MacroAssembler* masm,
bool use_sse3,
Label* operand_conversion_failure);
+ // Test if operands are smis or heap numbers and load them
+ // into xmm0 and xmm1 if they are. Operands are in edx and eax.
+ // Leaves operands unchanged.
+ static void LoadSSE2Operands(MacroAssembler* masm);
// Test if operands are numbers (smi or HeapNumber objects), and load
// them into xmm0 and xmm1 if they are. Jump to label not_numbers if
// either operand is not a number. Operands are in edx and eax.
@@ -824,12 +840,13 @@ const char* GenericBinaryOpStub::GetName() {
}
OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
- "GenericBinaryOpStub_%s_%s%s_%s%s",
+ "GenericBinaryOpStub_%s_%s%s_%s%s_%s",
op_name,
overwrite_name,
(flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "",
args_in_registers_ ? "RegArgs" : "StackArgs",
- args_reversed_ ? "_R" : "");
+ args_reversed_ ? "_R" : "",
+ NumberInfo::ToString(operands_type_));
return name_;
}
@@ -979,27 +996,35 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op,
// Neither operand is known to be a string.
}
- bool left_is_smi = left.is_constant() && left.handle()->IsSmi();
- bool left_is_non_smi = left.is_constant() && !left.handle()->IsSmi();
- bool right_is_smi = right.is_constant() && right.handle()->IsSmi();
- bool right_is_non_smi = right.is_constant() && !right.handle()->IsSmi();
+ bool left_is_smi_constant = left.is_constant() && left.handle()->IsSmi();
+ bool left_is_non_smi_constant = left.is_constant() && !left.handle()->IsSmi();
+ bool right_is_smi_constant = right.is_constant() && right.handle()->IsSmi();
+ bool right_is_non_smi_constant =
+ right.is_constant() && !right.handle()->IsSmi();
- if (left_is_smi && right_is_smi) {
+ if (left_is_smi_constant && right_is_smi_constant) {
// Compute the constant result at compile time, and leave it on the frame.
int left_int = Smi::cast(*left.handle())->value();
int right_int = Smi::cast(*right.handle())->value();
if (FoldConstantSmis(op, left_int, right_int)) return;
}
+ // Get number type of left and right sub-expressions.
+ NumberInfo::Type operands_type =
+ NumberInfo::Combine(left.number_info(), right.number_info());
+
Result answer;
- if (left_is_non_smi || right_is_non_smi) {
+ if (left_is_non_smi_constant || right_is_non_smi_constant) {
// Go straight to the slow case, with no smi code.
- GenericBinaryOpStub stub(op, overwrite_mode, NO_SMI_CODE_IN_STUB);
+ GenericBinaryOpStub stub(op,
+ overwrite_mode,
+ NO_SMI_CODE_IN_STUB,
+ operands_type);
answer = stub.GenerateCall(masm_, frame_, &left, &right);
- } else if (right_is_smi) {
+ } else if (right_is_smi_constant) {
answer = ConstantSmiBinaryOperation(op, &left, right.handle(),
type, false, overwrite_mode);
- } else if (left_is_smi) {
+ } else if (left_is_smi_constant) {
answer = ConstantSmiBinaryOperation(op, &right, left.handle(),
type, true, overwrite_mode);
} else {
@@ -1011,10 +1036,67 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op,
if (loop_nesting() > 0 && (Token::IsBitOp(op) || type->IsLikelySmi())) {
answer = LikelySmiBinaryOperation(op, &left, &right, overwrite_mode);
} else {
- GenericBinaryOpStub stub(op, overwrite_mode, NO_GENERIC_BINARY_FLAGS);
+ GenericBinaryOpStub stub(op,
+ overwrite_mode,
+ NO_GENERIC_BINARY_FLAGS,
+ operands_type);
answer = stub.GenerateCall(masm_, frame_, &left, &right);
}
}
+
+ // Set NumberInfo of result according to the operation performed.
+ // Rely on the fact that smis have a 31 bit payload on ia32.
+ ASSERT(kSmiValueSize == 31);
+ NumberInfo::Type result_type = NumberInfo::kUnknown;
+ switch (op) {
+ case Token::COMMA:
+ result_type = right.number_info();
+ break;
+ case Token::OR:
+ case Token::AND:
+ // Result type can be either of the two input types.
+ result_type = operands_type;
+ break;
+ case Token::BIT_OR:
+ case Token::BIT_XOR:
+ case Token::BIT_AND:
+ // Result is always a number. Smi property of inputs is preserved.
+ result_type = (operands_type == NumberInfo::kSmi)
+ ? NumberInfo::kSmi
+ : NumberInfo::kNumber;
+ break;
+ case Token::SAR:
+ // Result is a smi if we shift by a constant >= 1, otherwise a number.
+ result_type = (right.is_constant() && right.handle()->IsSmi()
+ && Smi::cast(*right.handle())->value() >= 1)
+ ? NumberInfo::kSmi
+ : NumberInfo::kNumber;
+ break;
+ case Token::SHR:
+ // Result is a smi if we shift by a constant >= 2, otherwise a number.
+ result_type = (right.is_constant() && right.handle()->IsSmi()
+ && Smi::cast(*right.handle())->value() >= 2)
+ ? NumberInfo::kSmi
+ : NumberInfo::kNumber;
+ break;
+ case Token::ADD:
+ // Result could be a string or a number. Check types of inputs.
+ result_type = NumberInfo::IsNumber(operands_type)
+ ? NumberInfo::kNumber
+ : NumberInfo::kUnknown;
+ break;
+ case Token::SHL:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV:
+ case Token::MOD:
+ // Result is always a number.
+ result_type = NumberInfo::kNumber;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ answer.set_number_info(result_type);
frame_->Push(&answer);
}
@@ -1856,6 +1938,39 @@ Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
break;
}
+ case Token::DIV:
+ if (!reversed && int_value == 2) {
+ operand->ToRegister();
+ frame_->Spill(operand->reg());
+
+ DeferredInlineSmiOperation* deferred =
+ new DeferredInlineSmiOperation(op,
+ operand->reg(),
+ operand->reg(),
+ smi_value,
+ overwrite_mode);
+ // Check that lowest log2(value) bits of operand are zero, and test
+ // smi tag at the same time.
+ ASSERT_EQ(0, kSmiTag);
+ ASSERT_EQ(1, kSmiTagSize);
+ __ test(operand->reg(), Immediate(3));
+ deferred->Branch(not_zero); // Branch if non-smi or odd smi.
+ __ sar(operand->reg(), 1);
+ deferred->BindExit();
+ answer = *operand;
+ } else {
+ // Cannot fall through MOD to default case, so we duplicate the
+ // default case here.
+ Result constant_operand(value);
+ if (reversed) {
+ answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
+ overwrite_mode);
+ } else {
+ answer = LikelySmiBinaryOperation(op, operand, &constant_operand,
+ overwrite_mode);
+ }
+ }
+ break;
// Generate inline code for mod of powers of 2 and negative powers of 2.
case Token::MOD:
if (!reversed &&
@@ -2335,6 +2450,7 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
// Load applicand.apply onto the stack. This will usually
// give us a megamorphic load site. Not super, but it works.
Load(applicand);
+ frame()->Dup();
Handle<String> name = Factory::LookupAsciiSymbol("apply");
frame()->Push(name);
Result answer = frame()->CallLoadIC(RelocInfo::CODE_TARGET);
@@ -2344,7 +2460,9 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
// Load the receiver and the existing arguments object onto the
// expression stack. Avoid allocating the arguments object here.
Load(receiver);
- LoadFromSlot(scope_->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
+ Result existing_args =
+ LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
+ frame()->Push(&existing_args);
// Emit the source position information after having loaded the
// receiver and the arguments.
@@ -2424,8 +2542,8 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
__ j(equal, &adapted);
// No arguments adaptor frame. Copy fixed number of arguments.
- __ mov(eax, Immediate(scope_->num_parameters()));
- for (int i = 0; i < scope_->num_parameters(); i++) {
+ __ mov(eax, Immediate(scope()->num_parameters()));
+ for (int i = 0; i < scope()->num_parameters(); i++) {
__ push(frame_->ParameterAt(i));
}
__ jmp(&invoke);
@@ -2831,7 +2949,7 @@ void CodeGenerator::GenerateReturnSequence(Result* return_value) {
// Leave the frame and return popping the arguments and the
// receiver.
frame_->Exit();
- masm_->ret((scope_->num_parameters() + 1) * kPointerSize);
+ masm_->ret((scope()->num_parameters() + 1) * kPointerSize);
DeleteFrame();
#ifdef ENABLE_DEBUGGER_SUPPORT
@@ -3914,35 +4032,32 @@ void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
// Spill everything, even constants, to the frame.
frame_->SpillAll();
- DebuggerStatementStub ces;
- frame_->CallStub(&ces, 0);
+ frame_->DebugBreak();
// Ignore the return value.
#endif
}
-void CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) {
+Result CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) {
ASSERT(boilerplate->IsBoilerplate());
// The inevitable call will sync frame elements to memory anyway, so
// we do it eagerly to allow us to push the arguments directly into
// place.
- frame_->SyncRange(0, frame_->element_count() - 1);
+ frame()->SyncRange(0, frame()->element_count() - 1);
// Use the fast case closure allocation code that allocates in new
// space for nested functions that don't need literals cloning.
if (scope()->is_function_scope() && boilerplate->NumberOfLiterals() == 0) {
FastNewClosureStub stub;
- frame_->EmitPush(Immediate(boilerplate));
- Result answer = frame_->CallStub(&stub, 1);
- frame_->Push(&answer);
+ frame()->EmitPush(Immediate(boilerplate));
+ return frame()->CallStub(&stub, 1);
} else {
// Call the runtime to instantiate the function boilerplate
// object.
- frame_->EmitPush(esi);
- frame_->EmitPush(Immediate(boilerplate));
- Result result = frame_->CallRuntime(Runtime::kNewClosure, 2);
- frame_->Push(&result);
+ frame()->EmitPush(esi);
+ frame()->EmitPush(Immediate(boilerplate));
+ return frame()->CallRuntime(Runtime::kNewClosure, 2);
}
}
@@ -3952,17 +4067,19 @@ void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
// Build the function boilerplate and instantiate it.
Handle<JSFunction> boilerplate =
- Compiler::BuildBoilerplate(node, script_, this);
+ Compiler::BuildBoilerplate(node, script(), this);
// Check for stack-overflow exception.
if (HasStackOverflow()) return;
- InstantiateBoilerplate(boilerplate);
+ Result result = InstantiateBoilerplate(boilerplate);
+ frame()->Push(&result);
}
void CodeGenerator::VisitFunctionBoilerplateLiteral(
FunctionBoilerplateLiteral* node) {
Comment cmnt(masm_, "[ FunctionBoilerplateLiteral");
- InstantiateBoilerplate(node->boilerplate());
+ Result result = InstantiateBoilerplate(node->boilerplate());
+ frame()->Push(&result);
}
@@ -3998,13 +4115,12 @@ void CodeGenerator::VisitConditional(Conditional* node) {
}
-void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
+Result CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
+ Result result;
if (slot->type() == Slot::LOOKUP) {
ASSERT(slot->var()->is_dynamic());
-
JumpTarget slow;
JumpTarget done;
- Result value;
// Generate fast-case code for variables that might be shadowed by
// eval-introduced variables. Eval is used a lot without
@@ -4012,14 +4128,10 @@ void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
// perform a runtime call for all variables in the scope
// containing the eval.
if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
- value = LoadFromGlobalSlotCheckExtensions(slot, typeof_state, &slow);
+ result = LoadFromGlobalSlotCheckExtensions(slot, typeof_state, &slow);
// If there was no control flow to slow, we can exit early.
- if (!slow.is_linked()) {
- frame_->Push(&value);
- return;
- }
-
- done.Jump(&value);
+ if (!slow.is_linked()) return result;
+ done.Jump(&result);
} else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
@@ -4029,21 +4141,21 @@ void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
// Allocate a fresh register to use as a temp in
// ContextSlotOperandCheckExtensions and to hold the result
// value.
- value = allocator_->Allocate();
- ASSERT(value.is_valid());
- __ mov(value.reg(),
+ result = allocator()->Allocate();
+ ASSERT(result.is_valid());
+ __ mov(result.reg(),
ContextSlotOperandCheckExtensions(potential_slot,
- value,
+ result,
&slow));
if (potential_slot->var()->mode() == Variable::CONST) {
- __ cmp(value.reg(), Factory::the_hole_value());
- done.Branch(not_equal, &value);
- __ mov(value.reg(), Factory::undefined_value());
+ __ cmp(result.reg(), Factory::the_hole_value());
+ done.Branch(not_equal, &result);
+ __ mov(result.reg(), Factory::undefined_value());
}
// There is always control flow to slow from
// ContextSlotOperandCheckExtensions so we have to jump around
// it.
- done.Jump(&value);
+ done.Jump(&result);
}
}
@@ -4051,18 +4163,18 @@ void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
// A runtime call is inevitable. We eagerly sync frame elements
// to memory so that we can push the arguments directly into place
// on top of the frame.
- frame_->SyncRange(0, frame_->element_count() - 1);
- frame_->EmitPush(esi);
- frame_->EmitPush(Immediate(slot->var()->name()));
+ frame()->SyncRange(0, frame()->element_count() - 1);
+ frame()->EmitPush(esi);
+ frame()->EmitPush(Immediate(slot->var()->name()));
if (typeof_state == INSIDE_TYPEOF) {
- value =
- frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+ result =
+ frame()->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
} else {
- value = frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
+ result = frame()->CallRuntime(Runtime::kLoadContextSlot, 2);
}
- done.Bind(&value);
- frame_->Push(&value);
+ done.Bind(&result);
+ return result;
} else if (slot->var()->mode() == Variable::CONST) {
// Const slots may contain 'the hole' value (the constant hasn't been
@@ -4073,19 +4185,21 @@ void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
// potentially unsafe direct-frame access of SlotOperand.
VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ Load const");
- JumpTarget exit;
+ Label exit;
__ mov(ecx, SlotOperand(slot, ecx));
__ cmp(ecx, Factory::the_hole_value());
- exit.Branch(not_equal);
+ __ j(not_equal, &exit);
__ mov(ecx, Factory::undefined_value());
- exit.Bind();
- frame_->EmitPush(ecx);
+ __ bind(&exit);
+ return Result(ecx);
} else if (slot->type() == Slot::PARAMETER) {
- frame_->PushParameterAt(slot->index());
+ frame()->PushParameterAt(slot->index());
+ return frame()->Pop();
} else if (slot->type() == Slot::LOCAL) {
- frame_->PushLocalAt(slot->index());
+ frame()->PushLocalAt(slot->index());
+ return frame()->Pop();
} else {
// The other remaining slot types (LOOKUP and GLOBAL) cannot reach
@@ -4094,49 +4208,46 @@ void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
// The use of SlotOperand below is safe for an unspilled frame
// because it will always be a context slot.
ASSERT(slot->type() == Slot::CONTEXT);
- Result temp = allocator_->Allocate();
- ASSERT(temp.is_valid());
- __ mov(temp.reg(), SlotOperand(slot, temp.reg()));
- frame_->Push(&temp);
+ result = allocator()->Allocate();
+ ASSERT(result.is_valid());
+ __ mov(result.reg(), SlotOperand(slot, result.reg()));
+ return result;
}
}
-void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
- TypeofState state) {
- LoadFromSlot(slot, state);
+Result CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
+ TypeofState state) {
+ Result result = LoadFromSlot(slot, state);
// Bail out quickly if we're not using lazy arguments allocation.
- if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return;
+ if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return result;
// ... or if the slot isn't a non-parameter arguments slot.
- if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return;
-
- // Pop the loaded value from the stack.
- Result value = frame_->Pop();
+ if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return result;
// If the loaded value is a constant, we know if the arguments
// object has been lazily loaded yet.
- if (value.is_constant()) {
- if (value.handle()->IsTheHole()) {
- Result arguments = StoreArgumentsObject(false);
- frame_->Push(&arguments);
+ if (result.is_constant()) {
+ if (result.handle()->IsTheHole()) {
+ result.Unuse();
+ return StoreArgumentsObject(false);
} else {
- frame_->Push(&value);
+ return result;
}
- return;
}
// The loaded value is in a register. If it is the sentinel that
// indicates that we haven't loaded the arguments object yet, we
// need to do it now.
JumpTarget exit;
- __ cmp(Operand(value.reg()), Immediate(Factory::the_hole_value()));
- frame_->Push(&value);
- exit.Branch(not_equal);
- Result arguments = StoreArgumentsObject(false);
- frame_->SetElementAt(0, &arguments);
- exit.Bind();
+ __ cmp(Operand(result.reg()), Immediate(Factory::the_hole_value()));
+ exit.Branch(not_equal, &result);
+
+ result.Unuse();
+ result = StoreArgumentsObject(false);
+ exit.Bind(&result);
+ return result;
}
@@ -4206,8 +4317,6 @@ Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
// property case was inlined. Ensure that there is not a test eax
// instruction here.
__ nop();
- // Discard the global object. The result is in answer.
- frame_->Drop();
return answer;
}
@@ -4312,7 +4421,8 @@ void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
void CodeGenerator::VisitSlot(Slot* node) {
Comment cmnt(masm_, "[ Slot");
- LoadFromSlotCheckForArguments(node, NOT_INSIDE_TYPEOF);
+ Result result = LoadFromSlotCheckForArguments(node, NOT_INSIDE_TYPEOF);
+ frame()->Push(&result);
}
@@ -4607,106 +4717,214 @@ void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
}
+void CodeGenerator::EmitSlotAssignment(Assignment* node) {
+#ifdef DEBUG
+ int original_height = frame()->height();
+#endif
+ Comment cmnt(masm(), "[ Variable Assignment");
+ Variable* var = node->target()->AsVariableProxy()->AsVariable();
+ ASSERT(var != NULL);
+ Slot* slot = var->slot();
+ ASSERT(slot != NULL);
+
+ // Evaluate the right-hand side.
+ if (node->is_compound()) {
+ Result result = LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
+ frame()->Push(&result);
+ Load(node->value());
+
+ bool overwrite_value =
+ (node->value()->AsBinaryOperation() != NULL &&
+ node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
+ GenericBinaryOperation(node->binary_op(),
+ node->type(),
+ overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
+ } else {
+ Load(node->value());
+ }
+
+ // Perform the assignment.
+ if (var->mode() != Variable::CONST || node->op() == Token::INIT_CONST) {
+ CodeForSourcePosition(node->position());
+ StoreToSlot(slot,
+ node->op() == Token::INIT_CONST ? CONST_INIT : NOT_CONST_INIT);
+ }
+ ASSERT(frame()->height() == original_height + 1);
+}
+
+
+void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) {
+#ifdef DEBUG
+ int original_height = frame()->height();
+#endif
+ Comment cmnt(masm(), "[ Named Property Assignment");
+ Variable* var = node->target()->AsVariableProxy()->AsVariable();
+ Property* prop = node->target()->AsProperty();
+ ASSERT(var == NULL || (prop == NULL && var->is_global()));
+
+ // Initialize name and evaluate the receiver subexpression.
+ Handle<String> name;
+ if (var != NULL) {
+ name = var->name();
+ LoadGlobal();
+ } else {
+ Literal* lit = prop->key()->AsLiteral();
+ ASSERT(lit != NULL);
+ name = Handle<String>::cast(lit->handle());
+ Load(prop->obj());
+ }
+
+ if (node->starts_initialization_block()) {
+ // Change to slow case in the beginning of an initialization block to
+ // avoid the quadratic behavior of repeatedly adding fast properties.
+ frame()->Dup();
+ Result ignored = frame()->CallRuntime(Runtime::kToSlowProperties, 1);
+ }
+
+ if (node->ends_initialization_block()) {
+ // Add an extra copy of the receiver to the frame, so that it can be
+ // converted back to fast case after the assignment.
+ frame()->Dup();
+ }
+
+ // Evaluate the right-hand side.
+ if (node->is_compound()) {
+ frame()->Dup();
+ Result value = EmitNamedLoad(name, var != NULL);
+ frame()->Push(&value);
+ Load(node->value());
+
+ bool overwrite_value =
+ (node->value()->AsBinaryOperation() != NULL &&
+ node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
+ GenericBinaryOperation(node->binary_op(),
+ node->type(),
+ overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
+ } else {
+ Load(node->value());
+ }
+
+ // Perform the assignment. It is safe to ignore constants here.
+ ASSERT(var == NULL || var->mode() != Variable::CONST);
+ ASSERT(node->op() != Token::INIT_CONST);
+ CodeForSourcePosition(node->position());
+ Result answer = EmitNamedStore(name);
+ frame()->Push(&answer);
+
+ if (node->ends_initialization_block()) {
+ // The argument to the runtime call is the extra copy of the receiver,
+ // which is below the value of the assignment. Swap the receiver and
+ // the value of the assignment expression.
+ Result result = frame()->Pop();
+ Result receiver = frame()->Pop();
+ frame()->Push(&result);
+ frame()->Push(&receiver);
+ Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
+ }
+
+ ASSERT(frame()->height() == original_height + 1);
+}
+
+
+void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) {
+#ifdef DEBUG
+ int original_height = frame()->height();
+#endif
+ Comment cmnt(masm_, "[ Named Property Assignment");
+ Property* prop = node->target()->AsProperty();
+ ASSERT(prop != NULL);
+
+ // Evaluate the receiver subexpression.
+ Load(prop->obj());
+
+ if (node->starts_initialization_block()) {
+ // Change to slow case in the beginning of an initialization block to
+ // avoid the quadratic behavior of repeatedly adding fast properties.
+ frame_->Dup();
+ Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1);
+ }
+
+ if (node->ends_initialization_block()) {
+ // Add an extra copy of the receiver to the frame, so that it can be
+ // converted back to fast case after the assignment.
+ frame_->Dup();
+ }
+
+ // Evaluate the key subexpression.
+ Load(prop->key());
+
+ // Evaluate the right-hand side.
+ if (node->is_compound()) {
+ // Duplicate receiver and key.
+ frame()->PushElementAt(1);
+ frame()->PushElementAt(1);
+ Result value = EmitKeyedLoad();
+ frame()->Push(&value);
+ Load(node->value());
+
+ bool overwrite_value =
+ (node->value()->AsBinaryOperation() != NULL &&
+ node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
+ GenericBinaryOperation(node->binary_op(),
+ node->type(),
+ overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
+ } else {
+ Load(node->value());
+ }
+
+ // Perform the assignment. It is safe to ignore constants here.
+ ASSERT(node->op() != Token::INIT_CONST);
+ CodeForSourcePosition(node->position());
+ Result answer = EmitKeyedStore(prop->key()->type());
+ frame()->Push(&answer);
+
+ if (node->ends_initialization_block()) {
+ // The argument to the runtime call is the extra copy of the receiver,
+ // which is below the value of the assignment. Swap the receiver and
+ // the value of the assignment expression.
+ Result result = frame()->Pop();
+ Result receiver = frame()->Pop();
+ frame()->Push(&result);
+ frame()->Push(&receiver);
+ Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
+ }
+
+ ASSERT(frame()->height() == original_height + 1);
+}
+
+
void CodeGenerator::VisitAssignment(Assignment* node) {
#ifdef DEBUG
- int original_height = frame_->height();
+ int original_height = frame()->height();
#endif
- Comment cmnt(masm_, "[ Assignment");
+ Variable* var = node->target()->AsVariableProxy()->AsVariable();
+ Property* prop = node->target()->AsProperty();
- { Reference target(this, node->target(), node->is_compound());
- if (target.is_illegal()) {
- // Fool the virtual frame into thinking that we left the assignment's
- // value on the frame.
- frame_->Push(Smi::FromInt(0));
- return;
- }
- Variable* var = node->target()->AsVariableProxy()->AsVariable();
+ if (var != NULL && !var->is_global()) {
+ EmitSlotAssignment(node);
- if (node->starts_initialization_block()) {
- ASSERT(target.type() == Reference::NAMED ||
- target.type() == Reference::KEYED);
- // Change to slow case in the beginning of an initialization
- // block to avoid the quadratic behavior of repeatedly adding
- // fast properties.
+ } else if ((prop != NULL && prop->key()->IsPropertyName()) ||
+ (var != NULL && var->is_global())) {
+ // Properties whose keys are property names and global variables are
+ // treated as named property references. We do not need to consider
+ // global 'this' because it is not a valid left-hand side.
+ EmitNamedPropertyAssignment(node);
- // The receiver is the argument to the runtime call. It is the
- // first value pushed when the reference was loaded to the
- // frame.
- frame_->PushElementAt(target.size() - 1);
- Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1);
- }
- if (node->ends_initialization_block()) {
- // Add an extra copy of the receiver to the frame, so that it can be
- // converted back to fast case after the assignment.
- ASSERT(target.type() == Reference::NAMED ||
- target.type() == Reference::KEYED);
- if (target.type() == Reference::NAMED) {
- frame_->Dup();
- // Dup target receiver on stack.
- } else {
- ASSERT(target.type() == Reference::KEYED);
- Result temp = frame_->Pop();
- frame_->Dup();
- frame_->Push(&temp);
- }
- }
- if (node->op() == Token::ASSIGN ||
- node->op() == Token::INIT_VAR ||
- node->op() == Token::INIT_CONST) {
- Load(node->value());
-
- } else { // Assignment is a compound assignment.
- Literal* literal = node->value()->AsLiteral();
- bool overwrite_value =
- (node->value()->AsBinaryOperation() != NULL &&
- node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
- Variable* right_var = node->value()->AsVariableProxy()->AsVariable();
- // There are two cases where the target is not read in the right hand
- // side, that are easy to test for: the right hand side is a literal,
- // or the right hand side is a different variable. TakeValue invalidates
- // the target, with an implicit promise that it will be written to again
- // before it is read.
- if (literal != NULL || (right_var != NULL && right_var != var)) {
- target.TakeValue();
- } else {
- target.GetValue();
- }
- Load(node->value());
- GenericBinaryOperation(node->binary_op(),
- node->type(),
- overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
- }
+ } else if (prop != NULL) {
+ // Other properties (including rewritten parameters for a function that
+ // uses arguments) are keyed property assignments.
+ EmitKeyedPropertyAssignment(node);
- if (var != NULL &&
- var->mode() == Variable::CONST &&
- node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
- // Assignment ignored - leave the value on the stack.
- UnloadReference(&target);
- } else {
- CodeForSourcePosition(node->position());
- if (node->op() == Token::INIT_CONST) {
- // Dynamic constant initializations must use the function context
- // and initialize the actual constant declared. Dynamic variable
- // initializations are simply assignments and use SetValue.
- target.SetValue(CONST_INIT);
- } else {
- target.SetValue(NOT_CONST_INIT);
- }
- if (node->ends_initialization_block()) {
- ASSERT(target.type() == Reference::UNLOADED);
- // End of initialization block. Revert to fast case. The
- // argument to the runtime call is the extra copy of the receiver,
- // which is below the value of the assignment.
- // Swap the receiver and the value of the assignment expression.
- Result lhs = frame_->Pop();
- Result receiver = frame_->Pop();
- frame_->Push(&lhs);
- frame_->Push(&receiver);
- Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
- }
- }
+ } else {
+ // Invalid left-hand side.
+ Load(node->target());
+ Result result = frame()->CallRuntime(Runtime::kThrowReferenceError, 1);
+ // The runtime call doesn't actually return but the code generator will
+ // still generate code and expects a certain frame height.
+ frame()->Push(&result);
}
- ASSERT(frame_->height() == original_height + 1);
+
+ ASSERT(frame()->height() == original_height + 1);
}
@@ -4911,9 +5129,9 @@ void CodeGenerator::VisitCall(Call* node) {
LoadGlobalReceiver();
} else {
Load(property->obj());
+ frame()->Dup();
Load(property->key());
- Result function = EmitKeyedLoad(false);
- frame_->Drop(); // Key.
+ Result function = EmitKeyedLoad();
Result receiver = frame_->Pop();
frame_->Push(&function);
frame_->Push(&receiver);
@@ -5277,7 +5495,7 @@ void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
ASSERT(args->length() == 0);
// ArgumentsAccessStub takes the parameter count as an input argument
// in register eax. Create a constant result for it.
- Result count(Handle<Smi>(Smi::FromInt(scope_->num_parameters())));
+ Result count(Handle<Smi>(Smi::FromInt(scope()->num_parameters())));
// Call the shared stub to get to the arguments.length.
ArgumentsAccessStub stub(ArgumentsAccessStub::READ_LENGTH);
Result result = frame_->CallStub(&stub, &count);
@@ -5424,7 +5642,7 @@ void CodeGenerator::GenerateArgumentsAccess(ZoneList<Expression*>* args) {
Load(args->at(0));
Result key = frame_->Pop();
// Explicitly create a constant result.
- Result count(Handle<Smi>(Smi::FromInt(scope_->num_parameters())));
+ Result count(Handle<Smi>(Smi::FromInt(scope()->num_parameters())));
// Call the shared stub to get to arguments[key].
ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
Result result = frame_->CallStub(&stub, &key, &count);
@@ -5535,6 +5753,17 @@ void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
}
+void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
+ ASSERT_EQ(args->length(), 1);
+
+ // Load the argument on the stack and call the stub.
+ Load(args->at(0));
+ NumberToStringStub stub;
+ Result result = frame_->CallStub(&stub, 1);
+ frame_->Push(&result);
+}
+
+
void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
if (CheckForInlineRuntimeCall(node)) {
return;
@@ -5669,7 +5898,6 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
switch (op) {
case Token::SUB: {
GenericUnaryOpStub stub(Token::SUB, overwrite);
- // TODO(1222589): remove dependency of TOS being cached inside stub
Result operand = frame_->Pop();
Result answer = frame_->CallStub(&stub, &operand);
frame_->Push(&answer);
@@ -6285,7 +6513,7 @@ bool CodeGenerator::HasValidEntryRegisters() {
// Emit a LoadIC call to get the value from receiver and leave it in
-// dst. The receiver register is restored after the call.
+// dst.
class DeferredReferenceGetNamedValue: public DeferredCode {
public:
DeferredReferenceGetNamedValue(Register dst,
@@ -6308,7 +6536,9 @@ class DeferredReferenceGetNamedValue: public DeferredCode {
void DeferredReferenceGetNamedValue::Generate() {
- __ push(receiver_);
+ if (!receiver_.is(eax)) {
+ __ mov(eax, receiver_);
+ }
__ Set(ecx, Immediate(name_));
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET);
@@ -6325,7 +6555,6 @@ void DeferredReferenceGetNamedValue::Generate() {
__ IncrementCounter(&Counters::named_load_inline_miss, 1);
if (!dst_.is(eax)) __ mov(dst_, eax);
- __ pop(receiver_);
}
@@ -6333,9 +6562,8 @@ class DeferredReferenceGetKeyedValue: public DeferredCode {
public:
explicit DeferredReferenceGetKeyedValue(Register dst,
Register receiver,
- Register key,
- bool is_global)
- : dst_(dst), receiver_(receiver), key_(key), is_global_(is_global) {
+ Register key)
+ : dst_(dst), receiver_(receiver), key_(key) {
set_comment("[ DeferredReferenceGetKeyedValue");
}
@@ -6348,14 +6576,29 @@ class DeferredReferenceGetKeyedValue: public DeferredCode {
Register dst_;
Register receiver_;
Register key_;
- bool is_global_;
};
void DeferredReferenceGetKeyedValue::Generate() {
- __ push(receiver_); // First IC argument.
- __ push(key_); // Second IC argument.
-
+ if (!receiver_.is(eax)) {
+ // Register eax is available for key.
+ if (!key_.is(eax)) {
+ __ mov(eax, key_);
+ }
+ if (!receiver_.is(edx)) {
+ __ mov(edx, receiver_);
+ }
+ } else if (!key_.is(edx)) {
+ // Register edx is available for receiver.
+ if (!receiver_.is(edx)) {
+ __ mov(edx, receiver_);
+ }
+ if (!key_.is(eax)) {
+ __ mov(eax, key_);
+ }
+ } else {
+ __ xchg(edx, eax);
+ }
// Calculate the delta from the IC call instruction to the map check
// cmp instruction in the inlined version. This delta is stored in
// a test(eax, delta) instruction after the call so that we can find
@@ -6363,10 +6606,7 @@ void DeferredReferenceGetKeyedValue::Generate() {
// This means that we cannot allow test instructions after calls to
// KeyedLoadIC stubs in other places.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- RelocInfo::Mode mode = is_global_
- ? RelocInfo::CODE_TARGET_CONTEXT
- : RelocInfo::CODE_TARGET;
- __ call(ic, mode);
+ __ call(ic, RelocInfo::CODE_TARGET);
// The delta from the start of the map-compare instruction to the
// test instruction. We use masm_-> directly here instead of the __
// macro because the macro sometimes uses macro expansion to turn
@@ -6379,8 +6619,6 @@ void DeferredReferenceGetKeyedValue::Generate() {
__ IncrementCounter(&Counters::keyed_load_inline_miss, 1);
if (!dst_.is(eax)) __ mov(dst_, eax);
- __ pop(key_);
- __ pop(receiver_);
}
@@ -6432,12 +6670,91 @@ void DeferredReferenceSetKeyedValue::Generate() {
}
-Result CodeGenerator::EmitKeyedLoad(bool is_global) {
- Comment cmnt(masm_, "[ Load from keyed Property");
- // Inline array load code if inside of a loop. We do not know
- // the receiver map yet, so we initially generate the code with
- // a check against an invalid map. In the inline cache code, we
- // patch the map check if appropriate.
+Result CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
+#ifdef DEBUG
+ int original_height = frame()->height();
+#endif
+ Result result;
+ // Do not inline the inobject property case for loads from the global
+ // object. Also do not inline for unoptimized code. This saves time in
+ // the code generator. Unoptimized code is toplevel code or code that is
+ // not in a loop.
+ if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
+ Comment cmnt(masm(), "[ Load from named Property");
+ frame()->Push(name);
+
+ RelocInfo::Mode mode = is_contextual
+ ? RelocInfo::CODE_TARGET_CONTEXT
+ : RelocInfo::CODE_TARGET;
+ result = frame()->CallLoadIC(mode);
+ // A test eax instruction following the call signals that the inobject
+ // property case was inlined. Ensure that there is not a test eax
+ // instruction here.
+ __ nop();
+ } else {
+ // Inline the inobject property case.
+ Comment cmnt(masm(), "[ Inlined named property load");
+ Result receiver = frame()->Pop();
+ receiver.ToRegister();
+
+ result = allocator()->Allocate();
+ ASSERT(result.is_valid());
+ DeferredReferenceGetNamedValue* deferred =
+ new DeferredReferenceGetNamedValue(result.reg(), receiver.reg(), name);
+
+ // Check that the receiver is a heap object.
+ __ test(receiver.reg(), Immediate(kSmiTagMask));
+ deferred->Branch(zero);
+
+ __ bind(deferred->patch_site());
+ // This is the map check instruction that will be patched (so we can't
+ // use the double underscore macro that may insert instructions).
+ // Initially use an invalid map to force a failure.
+ masm()->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
+ Immediate(Factory::null_value()));
+ // This branch is always a forwards branch so it's always a fixed size
+ // which allows the assert below to succeed and patching to work.
+ deferred->Branch(not_equal);
+
+ // The delta from the patch label to the load offset must be statically
+ // known.
+ ASSERT(masm()->SizeOfCodeGeneratedSince(deferred->patch_site()) ==
+ LoadIC::kOffsetToLoadInstruction);
+ // The initial (invalid) offset has to be large enough to force a 32-bit
+ // instruction encoding to allow patching with an arbitrary offset. Use
+ // kMaxInt (minus kHeapObjectTag).
+ int offset = kMaxInt;
+ masm()->mov(result.reg(), FieldOperand(receiver.reg(), offset));
+
+ __ IncrementCounter(&Counters::named_load_inline, 1);
+ deferred->BindExit();
+ }
+ ASSERT(frame()->height() == original_height - 1);
+ return result;
+}
+
+
+Result CodeGenerator::EmitNamedStore(Handle<String> name) {
+#ifdef DEBUG
+ int original_height = frame()->height();
+#endif
+ frame()->Push(name);
+ Result result = frame()->CallStoreIC();
+
+ ASSERT(frame()->height() == original_height - 2);
+ return result;
+}
+
+
+Result CodeGenerator::EmitKeyedLoad() {
+#ifdef DEBUG
+ int original_height = frame()->height();
+#endif
+ Result result;
+ // Inline array load code if inside of a loop. We do not know the
+ // receiver map yet, so we initially generate the code with a check
+ // against an invalid map. In the inline cache code, we patch the map
+ // check if appropriate.
if (loop_nesting() > 0) {
Comment cmnt(masm_, "[ Inlined load from keyed Property");
@@ -6453,22 +6770,16 @@ Result CodeGenerator::EmitKeyedLoad(bool is_global) {
// Use a fresh temporary for the index and later the loaded
// value.
- Result index = allocator()->Allocate();
- ASSERT(index.is_valid());
+ result = allocator()->Allocate();
+ ASSERT(result.is_valid());
DeferredReferenceGetKeyedValue* deferred =
- new DeferredReferenceGetKeyedValue(index.reg(),
+ new DeferredReferenceGetKeyedValue(result.reg(),
receiver.reg(),
- key.reg(),
- is_global);
+ key.reg());
- // Check that the receiver is not a smi (only needed if this
- // is not a load from the global context) and that it has the
- // expected map.
- if (!is_global) {
- __ test(receiver.reg(), Immediate(kSmiTagMask));
- deferred->Branch(zero);
- }
+ __ test(receiver.reg(), Immediate(kSmiTagMask));
+ deferred->Branch(zero);
// Initially, use an invalid map. The map is patched in the IC
// initialization code.
@@ -6493,50 +6804,132 @@ Result CodeGenerator::EmitKeyedLoad(bool is_global) {
// Shift the key to get the actual index value and check that
// it is within bounds.
- __ mov(index.reg(), key.reg());
- __ SmiUntag(index.reg());
- __ cmp(index.reg(),
+ __ mov(result.reg(), key.reg());
+ __ SmiUntag(result.reg());
+ __ cmp(result.reg(),
FieldOperand(elements.reg(), FixedArray::kLengthOffset));
deferred->Branch(above_equal);
- // Load and check that the result is not the hole. We could
- // reuse the index or elements register for the value.
- //
- // TODO(206): Consider whether it makes sense to try some
- // heuristic about which register to reuse. For example, if
- // one is eax, the we can reuse that one because the value
- // coming from the deferred code will be in eax.
- Result value = index;
- __ mov(value.reg(), Operand(elements.reg(),
- index.reg(),
- times_4,
- FixedArray::kHeaderSize - kHeapObjectTag));
+ // Load and check that the result is not the hole.
+ __ mov(result.reg(), Operand(elements.reg(),
+ result.reg(),
+ times_4,
+ FixedArray::kHeaderSize - kHeapObjectTag));
elements.Unuse();
- index.Unuse();
- __ cmp(Operand(value.reg()), Immediate(Factory::the_hole_value()));
+ __ cmp(Operand(result.reg()), Immediate(Factory::the_hole_value()));
deferred->Branch(equal);
__ IncrementCounter(&Counters::keyed_load_inline, 1);
deferred->BindExit();
- // Restore the receiver and key to the frame and push the
- // result on top of it.
- frame_->Push(&receiver);
- frame_->Push(&key);
- return value;
} else {
Comment cmnt(masm_, "[ Load from keyed Property");
- RelocInfo::Mode mode = is_global
- ? RelocInfo::CODE_TARGET_CONTEXT
- : RelocInfo::CODE_TARGET;
- Result answer = frame_->CallKeyedLoadIC(mode);
+ result = frame_->CallKeyedLoadIC(RelocInfo::CODE_TARGET);
// Make sure that we do not have a test instruction after the
// call. A test instruction after the call is used to
// indicate that we have generated an inline version of the
// keyed load. The explicit nop instruction is here because
// the push that follows might be peep-hole optimized away.
__ nop();
- return answer;
}
+ ASSERT(frame()->height() == original_height - 2);
+ return result;
+}
+
+
+Result CodeGenerator::EmitKeyedStore(StaticType* key_type) {
+#ifdef DEBUG
+ int original_height = frame()->height();
+#endif
+ Result result;
+ // Generate inlined version of the keyed store if the code is in a loop
+ // and the key is likely to be a smi.
+ if (loop_nesting() > 0 && key_type->IsLikelySmi()) {
+ Comment cmnt(masm(), "[ Inlined store to keyed Property");
+
+ // Get the receiver, key and value into registers.
+ result = frame()->Pop();
+ Result key = frame()->Pop();
+ Result receiver = frame()->Pop();
+
+ Result tmp = allocator_->Allocate();
+ ASSERT(tmp.is_valid());
+
+ // Determine whether the value is a constant before putting it in a
+ // register.
+ bool value_is_constant = result.is_constant();
+
+ // Make sure that value, key and receiver are in registers.
+ result.ToRegister();
+ key.ToRegister();
+ receiver.ToRegister();
+
+ DeferredReferenceSetKeyedValue* deferred =
+ new DeferredReferenceSetKeyedValue(result.reg(),
+ key.reg(),
+ receiver.reg());
+
+ // Check that the value is a smi if it is not a constant. We can skip
+ // the write barrier for smis and constants.
+ if (!value_is_constant) {
+ __ test(result.reg(), Immediate(kSmiTagMask));
+ deferred->Branch(not_zero);
+ }
+
+ // Check that the key is a non-negative smi.
+ __ test(key.reg(), Immediate(kSmiTagMask | 0x80000000));
+ deferred->Branch(not_zero);
+
+ // Check that the receiver is not a smi.
+ __ test(receiver.reg(), Immediate(kSmiTagMask));
+ deferred->Branch(zero);
+
+ // Check that the receiver is a JSArray.
+ __ mov(tmp.reg(),
+ FieldOperand(receiver.reg(), HeapObject::kMapOffset));
+ __ movzx_b(tmp.reg(),
+ FieldOperand(tmp.reg(), Map::kInstanceTypeOffset));
+ __ cmp(tmp.reg(), JS_ARRAY_TYPE);
+ deferred->Branch(not_equal);
+
+ // Check that the key is within bounds. Both the key and the length of
+ // the JSArray are smis.
+ __ cmp(key.reg(),
+ FieldOperand(receiver.reg(), JSArray::kLengthOffset));
+ deferred->Branch(greater_equal);
+
+ // Get the elements array from the receiver and check that it is not a
+ // dictionary.
+ __ mov(tmp.reg(),
+ FieldOperand(receiver.reg(), JSObject::kElementsOffset));
+ // Bind the deferred code patch site to be able to locate the fixed
+ // array map comparison. When debugging, we patch this comparison to
+ // always fail so that we will hit the IC call in the deferred code
+ // which will allow the debugger to break for fast case stores.
+ __ bind(deferred->patch_site());
+ __ cmp(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
+ Immediate(Factory::fixed_array_map()));
+ deferred->Branch(not_equal);
+
+ // Store the value.
+ __ mov(Operand(tmp.reg(),
+ key.reg(),
+ times_2,
+ FixedArray::kHeaderSize - kHeapObjectTag),
+ result.reg());
+ __ IncrementCounter(&Counters::keyed_store_inline, 1);
+
+ deferred->BindExit();
+ } else {
+ result = frame()->CallKeyedStoreIC();
+ // Make sure that we do not have a test instruction after the
+ // call. A test instruction after the call is used to
+ // indicate that we have generated an inline version of the
+ // keyed store.
+ __ nop();
+ frame()->Drop(2);
+ }
+ ASSERT(frame()->height() == original_height - 3);
+ return result;
}
@@ -6556,7 +6949,7 @@ Handle<String> Reference::GetName() {
} else {
Literal* raw_name = property->key()->AsLiteral();
ASSERT(raw_name != NULL);
- return Handle<String>(String::cast(*raw_name->handle()));
+ return Handle<String>::cast(raw_name->handle());
}
}
@@ -6578,7 +6971,10 @@ void Reference::GetValue() {
Comment cmnt(masm, "[ Load from Slot");
Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
ASSERT(slot != NULL);
- cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
+ Result result =
+ cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
+ if (!persist_after_get_) set_unloaded();
+ cgen_->frame()->Push(&result);
break;
}
@@ -6586,87 +6982,27 @@ void Reference::GetValue() {
Variable* var = expression_->AsVariableProxy()->AsVariable();
bool is_global = var != NULL;
ASSERT(!is_global || var->is_global());
-
- // Do not inline the inobject property case for loads from the global
- // object. Also do not inline for unoptimized code. This saves time
- // in the code generator. Unoptimized code is toplevel code or code
- // that is not in a loop.
- if (is_global ||
- cgen_->scope()->is_global_scope() ||
- cgen_->loop_nesting() == 0) {
- Comment cmnt(masm, "[ Load from named Property");
- cgen_->frame()->Push(GetName());
-
- RelocInfo::Mode mode = is_global
- ? RelocInfo::CODE_TARGET_CONTEXT
- : RelocInfo::CODE_TARGET;
- Result answer = cgen_->frame()->CallLoadIC(mode);
- // A test eax instruction following the call signals that the
- // inobject property case was inlined. Ensure that there is not
- // a test eax instruction here.
- __ nop();
- cgen_->frame()->Push(&answer);
- } else {
- // Inline the inobject property case.
- Comment cmnt(masm, "[ Inlined named property load");
- Result receiver = cgen_->frame()->Pop();
- receiver.ToRegister();
-
- Result value = cgen_->allocator()->Allocate();
- ASSERT(value.is_valid());
- DeferredReferenceGetNamedValue* deferred =
- new DeferredReferenceGetNamedValue(value.reg(),
- receiver.reg(),
- GetName());
-
- // Check that the receiver is a heap object.
- __ test(receiver.reg(), Immediate(kSmiTagMask));
- deferred->Branch(zero);
-
- __ bind(deferred->patch_site());
- // This is the map check instruction that will be patched (so we can't
- // use the double underscore macro that may insert instructions).
- // Initially use an invalid map to force a failure.
- masm->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
- Immediate(Factory::null_value()));
- // This branch is always a forwards branch so it's always a fixed
- // size which allows the assert below to succeed and patching to work.
- deferred->Branch(not_equal);
-
- // The delta from the patch label to the load offset must be
- // statically known.
- ASSERT(masm->SizeOfCodeGeneratedSince(deferred->patch_site()) ==
- LoadIC::kOffsetToLoadInstruction);
- // The initial (invalid) offset has to be large enough to force
- // a 32-bit instruction encoding to allow patching with an
- // arbitrary offset. Use kMaxInt (minus kHeapObjectTag).
- int offset = kMaxInt;
- masm->mov(value.reg(), FieldOperand(receiver.reg(), offset));
-
- __ IncrementCounter(&Counters::named_load_inline, 1);
- deferred->BindExit();
- cgen_->frame()->Push(&receiver);
- cgen_->frame()->Push(&value);
- }
+ if (persist_after_get_) cgen_->frame()->Dup();
+ Result result = cgen_->EmitNamedLoad(GetName(), is_global);
+ if (!persist_after_get_) set_unloaded();
+ cgen_->frame()->Push(&result);
break;
}
case KEYED: {
- Variable* var = expression_->AsVariableProxy()->AsVariable();
- bool is_global = var != NULL;
- ASSERT(!is_global || var->is_global());
- Result value = cgen_->EmitKeyedLoad(is_global);
+ if (persist_after_get_) {
+ cgen_->frame()->PushElementAt(1);
+ cgen_->frame()->PushElementAt(1);
+ }
+ Result value = cgen_->EmitKeyedLoad();
cgen_->frame()->Push(&value);
+ if (!persist_after_get_) set_unloaded();
break;
}
default:
UNREACHABLE();
}
-
- if (!persist_after_get_) {
- cgen_->UnloadReference(this);
- }
}
@@ -6716,14 +7052,13 @@ void Reference::SetValue(InitState init_state) {
Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
ASSERT(slot != NULL);
cgen_->StoreToSlot(slot, init_state);
- cgen_->UnloadReference(this);
+ set_unloaded();
break;
}
case NAMED: {
Comment cmnt(masm, "[ Store to named Property");
- cgen_->frame()->Push(GetName());
- Result answer = cgen_->frame()->CallStoreIC();
+ Result answer = cgen_->EmitNamedStore(GetName());
cgen_->frame()->Push(&answer);
set_unloaded();
break;
@@ -6731,108 +7066,16 @@ void Reference::SetValue(InitState init_state) {
case KEYED: {
Comment cmnt(masm, "[ Store to keyed Property");
-
- // Generate inlined version of the keyed store if the code is in
- // a loop and the key is likely to be a smi.
Property* property = expression()->AsProperty();
ASSERT(property != NULL);
- StaticType* key_smi_analysis = property->key()->type();
-
- if (cgen_->loop_nesting() > 0 && key_smi_analysis->IsLikelySmi()) {
- Comment cmnt(masm, "[ Inlined store to keyed Property");
-
- // Get the receiver, key and value into registers.
- Result value = cgen_->frame()->Pop();
- Result key = cgen_->frame()->Pop();
- Result receiver = cgen_->frame()->Pop();
-
- Result tmp = cgen_->allocator_->Allocate();
- ASSERT(tmp.is_valid());
-
- // Determine whether the value is a constant before putting it
- // in a register.
- bool value_is_constant = value.is_constant();
-
- // Make sure that value, key and receiver are in registers.
- value.ToRegister();
- key.ToRegister();
- receiver.ToRegister();
-
- DeferredReferenceSetKeyedValue* deferred =
- new DeferredReferenceSetKeyedValue(value.reg(),
- key.reg(),
- receiver.reg());
-
- // Check that the value is a smi if it is not a constant. We
- // can skip the write barrier for smis and constants.
- if (!value_is_constant) {
- __ test(value.reg(), Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
- }
-
- // Check that the key is a non-negative smi.
- __ test(key.reg(), Immediate(kSmiTagMask | 0x80000000));
- deferred->Branch(not_zero);
-
- // Check that the receiver is not a smi.
- __ test(receiver.reg(), Immediate(kSmiTagMask));
- deferred->Branch(zero);
-
- // Check that the receiver is a JSArray.
- __ mov(tmp.reg(),
- FieldOperand(receiver.reg(), HeapObject::kMapOffset));
- __ movzx_b(tmp.reg(),
- FieldOperand(tmp.reg(), Map::kInstanceTypeOffset));
- __ cmp(tmp.reg(), JS_ARRAY_TYPE);
- deferred->Branch(not_equal);
-
- // Check that the key is within bounds. Both the key and the
- // length of the JSArray are smis.
- __ cmp(key.reg(),
- FieldOperand(receiver.reg(), JSArray::kLengthOffset));
- deferred->Branch(greater_equal);
-
- // Get the elements array from the receiver and check that it
- // is not a dictionary.
- __ mov(tmp.reg(),
- FieldOperand(receiver.reg(), JSObject::kElementsOffset));
- // Bind the deferred code patch site to be able to locate the
- // fixed array map comparison. When debugging, we patch this
- // comparison to always fail so that we will hit the IC call
- // in the deferred code which will allow the debugger to
- // break for fast case stores.
- __ bind(deferred->patch_site());
- __ cmp(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
- Immediate(Factory::fixed_array_map()));
- deferred->Branch(not_equal);
-
- // Store the value.
- __ mov(Operand(tmp.reg(),
- key.reg(),
- times_2,
- FixedArray::kHeaderSize - kHeapObjectTag),
- value.reg());
- __ IncrementCounter(&Counters::keyed_store_inline, 1);
-
- deferred->BindExit();
-
- cgen_->frame()->Push(&receiver);
- cgen_->frame()->Push(&key);
- cgen_->frame()->Push(&value);
- } else {
- Result answer = cgen_->frame()->CallKeyedStoreIC();
- // Make sure that we do not have a test instruction after the
- // call. A test instruction after the call is used to
- // indicate that we have generated an inline version of the
- // keyed store.
- __ nop();
- cgen_->frame()->Push(&answer);
- }
- cgen_->UnloadReference(this);
+ Result answer = cgen_->EmitKeyedStore(property->key()->type());
+ cgen_->frame()->Push(&answer);
+ set_unloaded();
break;
}
- default:
+ case UNLOADED:
+ case ILLEGAL:
UNREACHABLE();
}
}
@@ -6926,6 +7169,13 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
+ // Stack layout on entry:
+ //
+ // [esp + kPointerSize]: constant elements.
+ // [esp + (2 * kPointerSize)]: literal index.
+ // [esp + (3 * kPointerSize)]: literals array.
+
+ // All sizes here are multiples of kPointerSize.
int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
int size = JSArray::kSize + elements_size;
@@ -7045,6 +7295,8 @@ void GenericBinaryOpStub::GenerateCall(
}
} else if (left.is(left_arg)) {
__ mov(right_arg, right);
+ } else if (right.is(right_arg)) {
+ __ mov(left_arg, left);
} else if (left.is(right_arg)) {
if (IsOperationCommutative()) {
__ mov(left_arg, right);
@@ -7063,8 +7315,6 @@ void GenericBinaryOpStub::GenerateCall(
__ mov(right_arg, right);
__ mov(left_arg, left);
}
- } else if (right.is(right_arg)) {
- __ mov(left_arg, left);
} else {
// Order of moves is not important.
__ mov(left_arg, left);
@@ -7100,6 +7350,10 @@ void GenericBinaryOpStub::GenerateCall(
__ mov(left_arg, Immediate(right));
SetArgsReversed();
} else {
+ // For non-commutative operations, left and right_arg might be
+ // the same register. Therefore, the order of the moves is
+ // important here in order to not overwrite left before moving
+ // it to left_arg.
__ mov(left_arg, left);
__ mov(right_arg, Immediate(right));
}
@@ -7132,8 +7386,12 @@ void GenericBinaryOpStub::GenerateCall(
__ mov(right_arg, Immediate(left));
SetArgsReversed();
} else {
- __ mov(left_arg, Immediate(left));
+ // For non-commutative operations, right and left_arg might be
+ // the same register. Therefore, the order of the moves is
+ // important here in order to not overwrite right before moving
+ // it to right_arg.
__ mov(right_arg, right);
+ __ mov(left_arg, Immediate(left));
}
// Update flags to indicate that arguments are in registers.
SetArgsInRegisters();
@@ -7501,7 +7759,18 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
case Token::DIV: {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
- FloatingPointHelper::LoadSSE2Operands(masm, &call_runtime);
+ if (NumberInfo::IsNumber(operands_type_)) {
+ if (FLAG_debug_code) {
+ // Assert at runtime that inputs are only numbers.
+ __ AbortIfNotNumber(edx,
+ "GenericBinaryOpStub operand not a number.");
+ __ AbortIfNotNumber(eax,
+ "GenericBinaryOpStub operand not a number.");
+ }
+ FloatingPointHelper::LoadSSE2Operands(masm);
+ } else {
+ FloatingPointHelper::LoadSSE2Operands(masm, &call_runtime);
+ }
switch (op_) {
case Token::ADD: __ addsd(xmm0, xmm1); break;
@@ -7514,7 +7783,17 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
GenerateReturn(masm);
} else { // SSE2 not available, use FPU.
- FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx);
+ if (NumberInfo::IsNumber(operands_type_)) {
+ if (FLAG_debug_code) {
+ // Assert at runtime that inputs are only numbers.
+ __ AbortIfNotNumber(edx,
+ "GenericBinaryOpStub operand not a number.");
+ __ AbortIfNotNumber(eax,
+ "GenericBinaryOpStub operand not a number.");
+ }
+ } else {
+ FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx);
+ }
FloatingPointHelper::LoadFloatOperands(
masm,
ecx,
@@ -7626,7 +7905,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
switch (op_) {
case Token::ADD: {
// Test for string arguments before calling runtime.
- Label not_strings, not_string1, string1;
+ Label not_strings, not_string1, string1, string1_smi2;
Result answer;
__ test(edx, Immediate(kSmiTagMask));
__ j(zero, &not_string1);
@@ -7635,15 +7914,28 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
// First argument is a string, test second.
__ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &string1);
+ __ j(zero, &string1_smi2);
__ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ecx);
__ j(above_equal, &string1);
// First and second argument are strings. Jump to the string add stub.
- StringAddStub stub(NO_STRING_CHECK_IN_STUB);
- __ TailCallStub(&stub);
+ StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
+ __ TailCallStub(&string_add_stub);
+
+ __ bind(&string1_smi2);
+ // First argument is a string, second is a smi. Try to lookup the number
+ // string for the smi in the number string cache.
+ NumberToStringStub::GenerateLookupNumberStringCache(
+ masm, eax, edi, ebx, ecx, true, &string1);
+
+ // Call the string add stub to make the result.
+ __ EnterInternalFrame();
+ __ push(edx); // Original first argument.
+ __ push(edi); // Number to string result for second argument.
+ __ CallStub(&string_add_stub);
+ __ LeaveInternalFrame();
+ __ ret(2 * kPointerSize);
- // Only first argument is a string.
__ bind(&string1);
__ InvokeBuiltin(
HasArgsReversed() ?
@@ -7985,6 +8277,35 @@ void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
}
+void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm) {
+ Label load_smi_edx, load_eax, load_smi_eax, done;
+ // Load operand in edx into xmm0.
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(zero, &load_smi_edx, not_taken); // Argument in edx is a smi.
+ __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
+
+ __ bind(&load_eax);
+ // Load operand in eax into xmm1.
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &load_smi_eax, not_taken); // Argument in eax is a smi.
+ __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+ __ jmp(&done);
+
+ __ bind(&load_smi_edx);
+ __ SmiUntag(edx); // Untag smi before converting to float.
+ __ cvtsi2sd(xmm0, Operand(edx));
+ __ SmiTag(edx); // Retag smi for heap number overwriting test.
+ __ jmp(&load_eax);
+
+ __ bind(&load_smi_eax);
+ __ SmiUntag(eax); // Untag smi before converting to float.
+ __ cvtsi2sd(xmm1, Operand(eax));
+ __ SmiTag(eax); // Retag smi for heap number overwriting test.
+
+ __ bind(&done);
+}
+
+
void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm,
Label* not_numbers) {
Label load_smi_edx, load_eax, load_smi_eax, load_float_eax, done;
@@ -8314,6 +8635,11 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
+ // esp[0] : return address
+ // esp[4] : number of parameters
+ // esp[8] : receiver displacement
+ // esp[16] : function
+
// The displacement is used for skipping the return address and the
// frame pointer on the stack. It is the offset of the last
// parameter (if any) relative to the frame pointer.
@@ -8397,7 +8723,6 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
__ add(Operand(edi), Immediate(kPointerSize));
__ sub(Operand(edx), Immediate(kPointerSize));
__ dec(ecx);
- __ test(ecx, Operand(ecx));
__ j(not_zero, &loop);
// Return and remove the on-stack parameters.
@@ -8745,6 +9070,74 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
}
+void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
+ Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ bool object_is_smi,
+ Label* not_found) {
+ // Currently only lookup for smis. Check for smi if object is not known to be
+ // a smi.
+ if (!object_is_smi) {
+ ASSERT(kSmiTag == 0);
+ __ test(object, Immediate(kSmiTagMask));
+ __ j(not_zero, not_found);
+ }
+
+ // Use of registers. Register result is used as a temporary.
+ Register number_string_cache = result;
+ Register mask = scratch1;
+ Register scratch = scratch2;
+
+ // Load the number string cache.
+ ExternalReference roots_address = ExternalReference::roots_address();
+ __ mov(scratch, Immediate(Heap::kNumberStringCacheRootIndex));
+ __ mov(number_string_cache,
+ Operand::StaticArray(scratch, times_pointer_size, roots_address));
+ // Make the hash mask from the length of the number string cache. It
+ // contains two elements (number and string) for each cache entry.
+ __ mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
+ __ shr(mask, 1); // Divide length by two (length is not a smi).
+ __ sub(Operand(mask), Immediate(1)); // Make mask.
+ // Calculate the entry in the number string cache. The hash value in the
+ // number string cache for smis is just the smi value.
+ __ mov(scratch, object);
+ __ SmiUntag(scratch);
+ __ and_(scratch, Operand(mask));
+ // Check if the entry is the smi we are looking for.
+ __ cmp(object,
+ FieldOperand(number_string_cache,
+ scratch,
+ times_twice_pointer_size,
+ FixedArray::kHeaderSize));
+ __ j(not_equal, not_found);
+
+ // Get the result from the cache.
+ __ mov(result,
+ FieldOperand(number_string_cache,
+ scratch,
+ times_twice_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize));
+ __ IncrementCounter(&Counters::number_to_string_native, 1);
+}
+
+
+void NumberToStringStub::Generate(MacroAssembler* masm) {
+ Label runtime;
+
+ __ mov(ebx, Operand(esp, kPointerSize));
+
+ // Generate code to lookup number in the number string cache.
+ GenerateLookupNumberStringCache(masm, ebx, eax, ecx, edx, false, &runtime);
+ __ ret(1 * kPointerSize);
+
+ __ bind(&runtime);
+ // Handle number to string in the runtime system if not found in the cache.
+ __ TailCallRuntime(ExternalReference(Runtime::kNumberToString), 1, 1);
+}
+
+
void CompareStub::Generate(MacroAssembler* masm) {
Label call_builtin, done;
@@ -9077,6 +9470,9 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
// Slow-case: Non-function called.
__ bind(&slow);
+ // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
+ // of the original receiver from the call site).
+ __ mov(Operand(esp, (argc_ + 1) * kPointerSize), edi);
__ Set(eax, Immediate(argc_));
__ Set(ebx, Immediate(0));
__ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
@@ -9650,13 +10046,34 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// ecx: length of second string
// edx: second string
// Look at the length of the result of adding the two strings.
- Label string_add_flat_result;
+ Label string_add_flat_result, longer_than_two;
__ bind(&both_not_zero_length);
__ add(ebx, Operand(ecx));
// Use the runtime system when adding two one character strings, as it
// contains optimizations for this specific case using the symbol table.
__ cmp(ebx, 2);
- __ j(equal, &string_add_runtime);
+ __ j(not_equal, &longer_than_two);
+
+ // Check that both strings are non-external ascii strings.
+ __ JumpIfNotBothSequentialAsciiStrings(eax, edx, ebx, ecx,
+ &string_add_runtime);
+
+ // Get the two characters forming the sub string.
+ __ movzx_b(ebx, FieldOperand(eax, SeqAsciiString::kHeaderSize));
+ __ movzx_b(ecx, FieldOperand(edx, SeqAsciiString::kHeaderSize));
+
+ // Try to lookup two character string in symbol table. If it is not found
+ // just allocate a new one.
+ Label make_two_character_string, make_flat_ascii_string;
+ GenerateTwoCharacterSymbolTableProbe(masm, ebx, ecx, eax, edx, edi,
+ &make_two_character_string);
+ __ ret(2 * kPointerSize);
+
+ __ bind(&make_two_character_string);
+ __ Set(ebx, Immediate(2));
+ __ jmp(&make_flat_ascii_string);
+
+ __ bind(&longer_than_two);
// Check if resulting string will be flat.
__ cmp(ebx, String::kMinNonFlatLength);
__ j(below, &string_add_flat_result);
@@ -9723,7 +10140,10 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
__ test(ecx, Immediate(kAsciiStringTag));
__ j(zero, &string_add_runtime);
+
+ __ bind(&make_flat_ascii_string);
// Both strings are ascii strings. As they are short they are both flat.
+ // ebx: length of resulting flat string
__ AllocateAsciiString(eax, ebx, ecx, edx, edi, &string_add_runtime);
// eax: result string
__ mov(ecx, eax);
@@ -9880,6 +10300,190 @@ void StringStubBase::GenerateCopyCharactersREP(MacroAssembler* masm,
}
+void StringStubBase::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
+ Register c1,
+ Register c2,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* not_found) {
+ // Register scratch3 is the general scratch register in this function.
+ Register scratch = scratch3;
+
+ // Make sure that both characters are not digits as such strings has a
+ // different hash algorithm. Don't try to look for these in the symbol table.
+ Label not_array_index;
+ __ mov(scratch, c1);
+ __ sub(Operand(scratch), Immediate(static_cast<int>('0')));
+ __ cmp(Operand(scratch), Immediate(static_cast<int>('9' - '0')));
+ __ j(above, &not_array_index);
+ __ mov(scratch, c2);
+ __ sub(Operand(scratch), Immediate(static_cast<int>('0')));
+ __ cmp(Operand(scratch), Immediate(static_cast<int>('9' - '0')));
+ __ j(below_equal, not_found);
+
+ __ bind(&not_array_index);
+ // Calculate the two character string hash.
+ Register hash = scratch1;
+ GenerateHashInit(masm, hash, c1, scratch);
+ GenerateHashAddCharacter(masm, hash, c2, scratch);
+ GenerateHashGetHash(masm, hash, scratch);
+
+ // Collect the two characters in a register.
+ Register chars = c1;
+ __ shl(c2, kBitsPerByte);
+ __ or_(chars, Operand(c2));
+
+ // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
+ // hash: hash of two character string.
+
+ // Load the symbol table.
+ Register symbol_table = c2;
+ ExternalReference roots_address = ExternalReference::roots_address();
+ __ mov(scratch, Immediate(Heap::kSymbolTableRootIndex));
+ __ mov(symbol_table,
+ Operand::StaticArray(scratch, times_pointer_size, roots_address));
+
+ // Calculate capacity mask from the symbol table capacity.
+ Register mask = scratch2;
+ static const int kCapacityOffset =
+ FixedArray::kHeaderSize +
+ SymbolTable::kCapacityIndex * kPointerSize;
+ __ mov(mask, FieldOperand(symbol_table, kCapacityOffset));
+ __ SmiUntag(mask);
+ __ sub(Operand(mask), Immediate(1));
+
+ // Registers
+ // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
+ // hash: hash of two character string
+ // symbol_table: symbol table
+ // mask: capacity mask
+ // scratch: -
+
+ // Perform a number of probes in the symbol table.
+ static const int kProbes = 4;
+ Label found_in_symbol_table;
+ Label next_probe[kProbes], next_probe_pop_mask[kProbes];
+ for (int i = 0; i < kProbes; i++) {
+ // Calculate entry in symbol table.
+ __ mov(scratch, hash);
+ if (i > 0) {
+ __ add(Operand(scratch), Immediate(SymbolTable::GetProbeOffset(i)));
+ }
+ __ and_(scratch, Operand(mask));
+
+ // Load the entry from the symble table.
+ Register candidate = scratch; // Scratch register contains candidate.
+ ASSERT_EQ(1, SymbolTableShape::kEntrySize);
+ static const int kFirstElementOffset =
+ FixedArray::kHeaderSize +
+ SymbolTable::kPrefixStartIndex * kPointerSize +
+ SymbolTableShape::kPrefixSize * kPointerSize;
+ __ mov(candidate,
+ FieldOperand(symbol_table,
+ scratch,
+ times_pointer_size,
+ kFirstElementOffset));
+
+ // If entry is undefined no string with this hash can be found.
+ __ cmp(candidate, Factory::undefined_value());
+ __ j(equal, not_found);
+
+ // If length is not 2 the string is not a candidate.
+ __ cmp(FieldOperand(candidate, String::kLengthOffset), Immediate(2));
+ __ j(not_equal, &next_probe[i]);
+
+ // As we are out of registers save the mask on the stack and use that
+ // register as a temporary.
+ __ push(mask);
+ Register temp = mask;
+
+ // Check that the candidate is a non-external ascii string.
+ __ mov(temp, FieldOperand(candidate, HeapObject::kMapOffset));
+ __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
+ __ JumpIfInstanceTypeIsNotSequentialAscii(
+ temp, temp, &next_probe_pop_mask[i]);
+
+ // Check if the two characters match.
+ __ mov(temp, FieldOperand(candidate, SeqAsciiString::kHeaderSize));
+ __ and_(temp, 0x0000ffff);
+ __ cmp(chars, Operand(temp));
+ __ j(equal, &found_in_symbol_table);
+ __ bind(&next_probe_pop_mask[i]);
+ __ pop(mask);
+ __ bind(&next_probe[i]);
+ }
+
+ // No matching 2 character string found by probing.
+ __ jmp(not_found);
+
+ // Scratch register contains result when we fall through to here.
+ Register result = scratch;
+ __ bind(&found_in_symbol_table);
+ __ pop(mask); // Pop temporally saved mask from the stack.
+ if (!result.is(eax)) {
+ __ mov(eax, result);
+ }
+}
+
+
+void StringStubBase::GenerateHashInit(MacroAssembler* masm,
+ Register hash,
+ Register character,
+ Register scratch) {
+ // hash = character + (character << 10);
+ __ mov(hash, character);
+ __ shl(hash, 10);
+ __ add(hash, Operand(character));
+ // hash ^= hash >> 6;
+ __ mov(scratch, hash);
+ __ sar(scratch, 6);
+ __ xor_(hash, Operand(scratch));
+}
+
+
+void StringStubBase::GenerateHashAddCharacter(MacroAssembler* masm,
+ Register hash,
+ Register character,
+ Register scratch) {
+ // hash += character;
+ __ add(hash, Operand(character));
+ // hash += hash << 10;
+ __ mov(scratch, hash);
+ __ shl(scratch, 10);
+ __ add(hash, Operand(scratch));
+ // hash ^= hash >> 6;
+ __ mov(scratch, hash);
+ __ sar(scratch, 6);
+ __ xor_(hash, Operand(scratch));
+}
+
+
+void StringStubBase::GenerateHashGetHash(MacroAssembler* masm,
+ Register hash,
+ Register scratch) {
+ // hash += hash << 3;
+ __ mov(scratch, hash);
+ __ shl(scratch, 3);
+ __ add(hash, Operand(scratch));
+ // hash ^= hash >> 11;
+ __ mov(scratch, hash);
+ __ sar(scratch, 11);
+ __ xor_(hash, Operand(scratch));
+ // hash += hash << 15;
+ __ mov(scratch, hash);
+ __ shl(scratch, 15);
+ __ add(hash, Operand(scratch));
+
+ // if (hash == 0) hash = 27;
+ Label hash_not_zero;
+ __ test(hash, Operand(hash));
+ __ j(not_zero, &hash_not_zero);
+ __ mov(hash, Immediate(27));
+ __ bind(&hash_not_zero);
+}
+
+
void SubStringStub::Generate(MacroAssembler* masm) {
Label runtime;
@@ -9900,26 +10504,55 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// eax: string
// ebx: instance type
// Calculate length of sub string using the smi values.
- __ mov(ecx, Operand(esp, 1 * kPointerSize)); // to
+ Label result_longer_than_two;
+ __ mov(ecx, Operand(esp, 1 * kPointerSize)); // To index.
__ test(ecx, Immediate(kSmiTagMask));
__ j(not_zero, &runtime);
- __ mov(edx, Operand(esp, 2 * kPointerSize)); // from
+ __ mov(edx, Operand(esp, 2 * kPointerSize)); // From index.
__ test(edx, Immediate(kSmiTagMask));
__ j(not_zero, &runtime);
__ sub(ecx, Operand(edx));
- // Handle sub-strings of length 2 and less in the runtime system.
+ // Special handling of sub-strings of length 1 and 2. One character strings
+ // are handled in the runtime system (looked up in the single character
+ // cache). Two character strings are looked for in the symbol cache.
__ SmiUntag(ecx); // Result length is no longer smi.
__ cmp(ecx, 2);
- __ j(below_equal, &runtime);
+ __ j(greater, &result_longer_than_two);
+ __ j(less, &runtime);
+ // Sub string of length 2 requested.
+ // eax: string
+ // ebx: instance type
+ // ecx: sub string length (value is 2)
+ // edx: from index (smi)
+ __ JumpIfInstanceTypeIsNotSequentialAscii(ebx, ebx, &runtime);
+
+ // Get the two characters forming the sub string.
+ __ SmiUntag(edx); // From index is no longer smi.
+ __ movzx_b(ebx, FieldOperand(eax, edx, times_1, SeqAsciiString::kHeaderSize));
+ __ movzx_b(ecx,
+ FieldOperand(eax, edx, times_1, SeqAsciiString::kHeaderSize + 1));
+
+ // Try to lookup two character string in symbol table.
+ Label make_two_character_string;
+ GenerateTwoCharacterSymbolTableProbe(masm, ebx, ecx, eax, edx, edi,
+ &make_two_character_string);
+ __ ret(2 * kPointerSize);
+
+ __ bind(&make_two_character_string);
+ // Setup registers for allocating the two character string.
+ __ mov(eax, Operand(esp, 3 * kPointerSize));
+ __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
+ __ Set(ecx, Immediate(2));
+
+ __ bind(&result_longer_than_two);
// eax: string
// ebx: instance type
// ecx: result string length
// Check for flat ascii string
Label non_ascii_flat;
- __ and_(ebx, kStringRepresentationMask | kStringEncodingMask);
- __ cmp(ebx, kSeqStringTag | kAsciiStringTag);
- __ j(not_equal, &non_ascii_flat);
+ __ JumpIfInstanceTypeIsNotSequentialAscii(ebx, ebx, &non_ascii_flat);
// Allocate the result.
__ AllocateAsciiString(eax, ecx, ebx, edx, edi, &runtime);
@@ -9967,7 +10600,8 @@ void SubStringStub::Generate(MacroAssembler* masm) {
Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
// Load string argument and locate character of sub string start.
__ mov(esi, Operand(esp, 3 * kPointerSize));
- __ add(Operand(esi), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ add(Operand(esi),
+ Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
__ mov(ebx, Operand(esp, 2 * kPointerSize)); // from
// As from is a smi it is 2 times the value which matches the size of a two
// byte character.
diff --git a/deps/v8/src/ia32/codegen-ia32.h b/deps/v8/src/ia32/codegen-ia32.h
index 956f42433..a6cb3164b 100644
--- a/deps/v8/src/ia32/codegen-ia32.h
+++ b/deps/v8/src/ia32/codegen-ia32.h
@@ -305,19 +305,15 @@ class CodeGenerator: public AstVisitor {
// Takes a function literal, generates code for it. This function should only
// be called by compiler.cc.
- static Handle<Code> MakeCode(FunctionLiteral* fun,
- Handle<Script> script,
- bool is_eval,
- CompilationInfo* info);
+ static Handle<Code> MakeCode(CompilationInfo* info);
// Printing of AST, etc. as requested by flags.
- static void MakeCodePrologue(FunctionLiteral* fun);
+ static void MakeCodePrologue(CompilationInfo* info);
// Allocate and install the code.
- static Handle<Code> MakeCodeEpilogue(FunctionLiteral* fun,
- MacroAssembler* masm,
+ static Handle<Code> MakeCodeEpilogue(MacroAssembler* masm,
Code::Flags flags,
- Handle<Script> script);
+ CompilationInfo* info);
#ifdef ENABLE_LOGGING_AND_PROFILING
static bool ShouldGenerateLog(Expression* type);
@@ -328,7 +324,7 @@ class CodeGenerator: public AstVisitor {
// Accessors
MacroAssembler* masm() { return masm_; }
VirtualFrame* frame() const { return frame_; }
- Handle<Script> script() { return script_; }
+ inline Handle<Script> script();
bool has_valid_frame() const { return frame_ != NULL; }
@@ -352,11 +348,11 @@ class CodeGenerator: public AstVisitor {
private:
// Construction/Destruction
- CodeGenerator(MacroAssembler* masm, Handle<Script> script, bool is_eval);
+ explicit CodeGenerator(MacroAssembler* masm);
// Accessors
- Scope* scope() const { return scope_; }
- bool is_eval() { return is_eval_; }
+ inline bool is_eval();
+ Scope* scope();
// Generating deferred code.
void ProcessDeferred();
@@ -388,7 +384,7 @@ class CodeGenerator: public AstVisitor {
void VisitStatementsAndSpill(ZoneList<Statement*>* statements);
// Main code generation function
- void Generate(FunctionLiteral* fun, Mode mode, CompilationInfo* info);
+ void Generate(CompilationInfo* info, Mode mode);
// Generate the return sequence code. Should be called no more than
// once per compiled function, immediately after binding the return
@@ -396,7 +392,7 @@ class CodeGenerator: public AstVisitor {
void GenerateReturnSequence(Result* return_value);
// Returns the arguments allocation mode.
- ArgumentsAllocationMode ArgumentsMode() const;
+ ArgumentsAllocationMode ArgumentsMode();
// Store the arguments object and allocate it if necessary.
Result StoreArgumentsObject(bool initial);
@@ -433,8 +429,8 @@ class CodeGenerator: public AstVisitor {
void LoadAndSpill(Expression* expression);
// Read a value from a slot and leave it on top of the expression stack.
- void LoadFromSlot(Slot* slot, TypeofState typeof_state);
- void LoadFromSlotCheckForArguments(Slot* slot, TypeofState typeof_state);
+ Result LoadFromSlot(Slot* slot, TypeofState typeof_state);
+ Result LoadFromSlotCheckForArguments(Slot* slot, TypeofState typeof_state);
Result LoadFromGlobalSlotCheckExtensions(Slot* slot,
TypeofState typeof_state,
JumpTarget* slow);
@@ -443,10 +439,22 @@ class CodeGenerator: public AstVisitor {
// value in place.
void StoreToSlot(Slot* slot, InitState init_state);
- // Load a property of an object, returning it in a Result.
- // The object and the property name are passed on the stack, and
- // not changed.
- Result EmitKeyedLoad(bool is_global);
+ // Support for compiling assignment expressions.
+ void EmitSlotAssignment(Assignment* node);
+ void EmitNamedPropertyAssignment(Assignment* node);
+ void EmitKeyedPropertyAssignment(Assignment* node);
+
+ // Receiver is passed on the frame and consumed.
+ Result EmitNamedLoad(Handle<String> name, bool is_contextual);
+
+ // Reciever and value are passed on the frame and consumed.
+ Result EmitNamedStore(Handle<String> name);
+
+ // Receiver and key are passed on the frame and consumed.
+ Result EmitKeyedLoad();
+
+ // Receiver, key, and value are passed on the frame and consumed.
+ Result EmitKeyedStore(StaticType* key_type);
// Special code for typeof expressions: Unfortunately, we must
// be careful when loading the expression in 'typeof'
@@ -537,7 +545,7 @@ class CodeGenerator: public AstVisitor {
void DeclareGlobals(Handle<FixedArray> pairs);
// Instantiate the function boilerplate.
- void InstantiateBoilerplate(Handle<JSFunction> boilerplate);
+ Result InstantiateBoilerplate(Handle<JSFunction> boilerplate);
// Support for type checks.
void GenerateIsSmi(ZoneList<Expression*>* args);
@@ -584,6 +592,9 @@ class CodeGenerator: public AstVisitor {
// Support for direct calls from JavaScript to native RegExp code.
void GenerateRegExpExec(ZoneList<Expression*>* args);
+ // Fast support for number to string.
+ void GenerateNumberToString(ZoneList<Expression*>* args);
+
// Simple condition analysis.
enum ConditionAnalysis {
ALWAYS_TRUE,
@@ -607,15 +618,14 @@ class CodeGenerator: public AstVisitor {
bool HasValidEntryRegisters();
#endif
- bool is_eval_; // Tells whether code is generated for eval.
- Handle<Script> script_;
ZoneList<DeferredCode*> deferred_;
// Assembler
MacroAssembler* masm_; // to generate code
+ CompilationInfo* info_;
+
// Code generation state
- Scope* scope_;
VirtualFrame* frame_;
RegisterAllocator* allocator_;
CodeGenState* state_;
@@ -663,13 +673,15 @@ class GenericBinaryOpStub: public CodeStub {
public:
GenericBinaryOpStub(Token::Value op,
OverwriteMode mode,
- GenericBinaryFlags flags)
+ GenericBinaryFlags flags,
+ NumberInfo::Type operands_type = NumberInfo::kUnknown)
: op_(op),
mode_(mode),
flags_(flags),
args_in_registers_(false),
args_reversed_(false),
- name_(NULL) {
+ name_(NULL),
+ operands_type_(operands_type) {
use_sse3_ = CpuFeatures::IsSupported(SSE3);
ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
@@ -694,28 +706,32 @@ class GenericBinaryOpStub: public CodeStub {
bool args_reversed_; // Left and right argument are swapped.
bool use_sse3_;
char* name_;
+ NumberInfo::Type operands_type_; // Number type information of operands.
const char* GetName();
#ifdef DEBUG
void Print() {
- PrintF("GenericBinaryOpStub (op %s), "
- "(mode %d, flags %d, registers %d, reversed %d)\n",
+ PrintF("GenericBinaryOpStub %d (op %s), "
+ "(mode %d, flags %d, registers %d, reversed %d, number_info %s)\n",
+ MinorKey(),
Token::String(op_),
static_cast<int>(mode_),
static_cast<int>(flags_),
static_cast<int>(args_in_registers_),
- static_cast<int>(args_reversed_));
+ static_cast<int>(args_reversed_),
+ NumberInfo::ToString(operands_type_));
}
#endif
- // Minor key encoding in 16 bits FRASOOOOOOOOOOMM.
+ // Minor key encoding in 16 bits NNNFRASOOOOOOOMM.
class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 10> {};
- class SSE3Bits: public BitField<bool, 12, 1> {};
- class ArgsInRegistersBits: public BitField<bool, 13, 1> {};
- class ArgsReversedBits: public BitField<bool, 14, 1> {};
- class FlagBits: public BitField<GenericBinaryFlags, 15, 1> {};
+ class OpBits: public BitField<Token::Value, 2, 7> {};
+ class SSE3Bits: public BitField<bool, 9, 1> {};
+ class ArgsInRegistersBits: public BitField<bool, 10, 1> {};
+ class ArgsReversedBits: public BitField<bool, 11, 1> {};
+ class FlagBits: public BitField<GenericBinaryFlags, 12, 1> {};
+ class NumberInfoBits: public BitField<NumberInfo::Type, 13, 3> {};
Major MajorKey() { return GenericBinaryOp; }
int MinorKey() {
@@ -725,7 +741,8 @@ class GenericBinaryOpStub: public CodeStub {
| FlagBits::encode(flags_)
| SSE3Bits::encode(use_sse3_)
| ArgsInRegistersBits::encode(args_in_registers_)
- | ArgsReversedBits::encode(args_reversed_);
+ | ArgsReversedBits::encode(args_reversed_)
+ | NumberInfoBits::encode(operands_type_);
}
void Generate(MacroAssembler* masm);
@@ -750,13 +767,6 @@ class GenericBinaryOpStub: public CodeStub {
};
-// Flag that indicates how to generate code for the stub StringAddStub.
-enum StringAddFlags {
- NO_STRING_ADD_FLAGS = 0,
- NO_STRING_CHECK_IN_STUB = 1 << 0 // Omit string check in stub.
-};
-
-
class StringStubBase: public CodeStub {
public:
// Generate code for copying characters using a simple loop. This should only
@@ -779,6 +789,38 @@ class StringStubBase: public CodeStub {
Register count, // Must be ecx.
Register scratch, // Neither of the above.
bool ascii);
+
+ // Probe the symbol table for a two character string. If the string is
+ // not found by probing a jump to the label not_found is performed. This jump
+ // does not guarantee that the string is not in the symbol table. If the
+ // string is found the code falls through with the string in register eax.
+ void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
+ Register c1,
+ Register c2,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* not_found);
+
+ // Generate string hash.
+ void GenerateHashInit(MacroAssembler* masm,
+ Register hash,
+ Register character,
+ Register scratch);
+ void GenerateHashAddCharacter(MacroAssembler* masm,
+ Register hash,
+ Register character,
+ Register scratch);
+ void GenerateHashGetHash(MacroAssembler* masm,
+ Register hash,
+ Register scratch);
+};
+
+
+// Flag that indicates how to generate code for the stub StringAddStub.
+enum StringAddFlags {
+ NO_STRING_ADD_FLAGS = 0,
+ NO_STRING_CHECK_IN_STUB = 1 << 0 // Omit string check in stub.
};
@@ -833,6 +875,39 @@ class StringCompareStub: public StringStubBase {
};
+class NumberToStringStub: public CodeStub {
+ public:
+ NumberToStringStub() { }
+
+ // Generate code to do a lookup in the number string cache. If the number in
+ // the register object is found in the cache the generated code falls through
+ // with the result in the result register. The object and the result register
+ // can be the same. If the number is not found in the cache the code jumps to
+ // the label not_found with only the content of register object unchanged.
+ static void GenerateLookupNumberStringCache(MacroAssembler* masm,
+ Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ bool object_is_smi,
+ Label* not_found);
+
+ private:
+ Major MajorKey() { return NumberToString; }
+ int MinorKey() { return 0; }
+
+ void Generate(MacroAssembler* masm);
+
+ const char* GetName() { return "NumberToStringStub"; }
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("NumberToStringStub\n");
+ }
+#endif
+};
+
+
} } // namespace v8::internal
#endif // V8_IA32_CODEGEN_IA32_H_
diff --git a/deps/v8/src/ia32/debug-ia32.cc b/deps/v8/src/ia32/debug-ia32.cc
index 1f34b3026..a9e26263f 100644
--- a/deps/v8/src/ia32/debug-ia32.cc
+++ b/deps/v8/src/ia32/debug-ia32.cc
@@ -125,9 +125,10 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
// Register state for IC load call (from ic-ia32.cc).
// ----------- S t a t e -------------
+ // -- eax : receiver
// -- ecx : name
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, ecx.bit(), false);
+ Generate_DebugBreakCallHelper(masm, eax.bit() | ecx.bit(), false);
}
diff --git a/deps/v8/src/ia32/disasm-ia32.cc b/deps/v8/src/ia32/disasm-ia32.cc
index cb500d564..a3b701645 100644
--- a/deps/v8/src/ia32/disasm-ia32.cc
+++ b/deps/v8/src/ia32/disasm-ia32.cc
@@ -1014,7 +1014,6 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
const char* mnem = NULL;
- printf("%d\n", regop);
switch (regop) {
case 5: mnem = "subb"; break;
case 7: mnem = "cmpb"; break;
diff --git a/deps/v8/src/ia32/fast-codegen-ia32.cc b/deps/v8/src/ia32/fast-codegen-ia32.cc
index 2a15733ae..9bab75aa6 100644
--- a/deps/v8/src/ia32/fast-codegen-ia32.cc
+++ b/deps/v8/src/ia32/fast-codegen-ia32.cc
@@ -35,79 +35,152 @@ namespace internal {
#define __ ACCESS_MASM(masm())
-void FastCodeGenerator::EmitLoadReceiver(Register reg) {
+Register FastCodeGenerator::accumulator0() { return eax; }
+Register FastCodeGenerator::accumulator1() { return edx; }
+Register FastCodeGenerator::scratch0() { return ecx; }
+Register FastCodeGenerator::scratch1() { return edi; }
+Register FastCodeGenerator::receiver_reg() { return ebx; }
+Register FastCodeGenerator::context_reg() { return esi; }
+
+
+void FastCodeGenerator::EmitLoadReceiver() {
// Offset 2 is due to return address and saved frame pointer.
int index = 2 + function()->scope()->num_parameters();
- __ mov(reg, Operand(ebp, index * kPointerSize));
+ __ mov(receiver_reg(), Operand(ebp, index * kPointerSize));
}
-void FastCodeGenerator::EmitReceiverMapCheck() {
- Comment cmnt(masm(), ";; MapCheck(this)");
- if (FLAG_print_ir) {
- PrintF("MapCheck(this)\n");
- }
+void FastCodeGenerator::EmitGlobalVariableLoad(Handle<Object> cell) {
+ ASSERT(!destination().is(no_reg));
+ ASSERT(cell->IsJSGlobalPropertyCell());
- EmitLoadReceiver(edx);
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, bailout());
+ __ mov(destination(), Immediate(cell));
+ __ mov(destination(),
+ FieldOperand(destination(), JSGlobalPropertyCell::kValueOffset));
+ if (FLAG_debug_code) {
+ __ cmp(destination(), Factory::the_hole_value());
+ __ Check(not_equal, "DontDelete cells can't contain the hole");
+ }
- ASSERT(has_receiver() && receiver()->IsHeapObject());
- Handle<HeapObject> object = Handle<HeapObject>::cast(receiver());
- Handle<Map> map(object->map());
- __ cmp(FieldOperand(edx, HeapObject::kMapOffset), Immediate(map));
- __ j(not_equal, bailout());
+ // The loaded value is not known to be a smi.
+ clear_as_smi(destination());
}
-void FastCodeGenerator::EmitGlobalVariableLoad(Handle<String> name) {
- // Compile global variable accesses as load IC calls. The only live
- // registers are esi (context) and possibly edx (this). Both are also
- // saved in the stack and esi is preserved by the call.
- __ push(CodeGenerator::GlobalObject());
- __ mov(ecx, name);
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
- if (has_this_properties()) {
- // Restore this.
- EmitLoadReceiver(edx);
+void FastCodeGenerator::EmitThisPropertyStore(Handle<String> name) {
+ LookupResult lookup;
+ info()->receiver()->Lookup(*name, &lookup);
+
+ ASSERT(lookup.holder() == *info()->receiver());
+ ASSERT(lookup.type() == FIELD);
+ Handle<Map> map(Handle<HeapObject>::cast(info()->receiver())->map());
+ int index = lookup.GetFieldIndex() - map->inobject_properties();
+ int offset = index * kPointerSize;
+
+ // We will emit the write barrier unless the stored value is statically
+ // known to be a smi.
+ bool needs_write_barrier = !is_smi(accumulator0());
+
+ // Perform the store. Negative offsets are inobject properties.
+ if (offset < 0) {
+ offset += map->instance_size();
+ __ mov(FieldOperand(receiver_reg(), offset), accumulator0());
+ if (needs_write_barrier) {
+ // Preserve receiver from write barrier.
+ __ mov(scratch0(), receiver_reg());
+ }
} else {
- __ nop(); // Not test eax, indicates IC has no inlined code at call site.
+ offset += FixedArray::kHeaderSize;
+ __ mov(scratch0(),
+ FieldOperand(receiver_reg(), JSObject::kPropertiesOffset));
+ __ mov(FieldOperand(scratch0(), offset), accumulator0());
+ }
+
+ if (needs_write_barrier) {
+ if (destination().is(no_reg)) {
+ // After RecordWrite accumulator0 is only accidently a smi, but it is
+ // already marked as not known to be one.
+ __ RecordWrite(scratch0(), offset, accumulator0(), scratch1());
+ } else {
+ // Copy the value to the other accumulator to preserve a copy from the
+ // write barrier. One of the accumulators is available as a scratch
+ // register. Neither is a smi.
+ __ mov(accumulator1(), accumulator0());
+ clear_as_smi(accumulator1());
+ Register value_scratch = other_accumulator(destination());
+ __ RecordWrite(scratch0(), offset, value_scratch, scratch1());
+ }
+ } else if (destination().is(accumulator1())) {
+ __ mov(accumulator1(), accumulator0());
+ // Is a smi because we do not need the write barrier.
+ set_as_smi(accumulator1());
}
}
-void FastCodeGenerator::EmitThisPropertyStore(Handle<String> name) {
+void FastCodeGenerator::EmitThisPropertyLoad(Handle<String> name) {
+ ASSERT(!destination().is(no_reg));
LookupResult lookup;
- receiver()->Lookup(*name, &lookup);
+ info()->receiver()->Lookup(*name, &lookup);
- ASSERT(lookup.holder() == *receiver());
+ ASSERT(lookup.holder() == *info()->receiver());
ASSERT(lookup.type() == FIELD);
- Handle<Map> map(Handle<HeapObject>::cast(receiver())->map());
+ Handle<Map> map(Handle<HeapObject>::cast(info()->receiver())->map());
int index = lookup.GetFieldIndex() - map->inobject_properties();
int offset = index * kPointerSize;
- // Negative offsets are inobject properties.
+ // Perform the load. Negative offsets are inobject properties.
if (offset < 0) {
offset += map->instance_size();
- __ mov(ecx, edx); // Copy receiver for write barrier.
+ __ mov(destination(), FieldOperand(receiver_reg(), offset));
} else {
offset += FixedArray::kHeaderSize;
- __ mov(ecx, FieldOperand(edx, JSObject::kPropertiesOffset));
+ __ mov(scratch0(),
+ FieldOperand(receiver_reg(), JSObject::kPropertiesOffset));
+ __ mov(destination(), FieldOperand(scratch0(), offset));
+ }
+
+ // The loaded value is not known to be a smi.
+ clear_as_smi(destination());
+}
+
+
+void FastCodeGenerator::EmitBitOr() {
+ if (is_smi(accumulator0()) && is_smi(accumulator1())) {
+ // If both operands are known to be a smi then there is no need to check
+ // the operands or result. There is no need to perform the operation in
+ // an effect context.
+ if (!destination().is(no_reg)) {
+ // Leave the result in the destination register. Bitwise or is
+ // commutative.
+ __ or_(destination(), Operand(other_accumulator(destination())));
+ }
+ } else if (destination().is(no_reg)) {
+ // Result is not needed but do not clobber the operands in case of
+ // bailout.
+ __ mov(scratch0(), accumulator1());
+ __ or_(scratch0(), Operand(accumulator0()));
+ __ test(scratch0(), Immediate(kSmiTagMask));
+ __ j(not_zero, bailout(), not_taken);
+ } else {
+ // Preserve the destination operand in a scratch register in case of
+ // bailout.
+ __ mov(scratch0(), destination());
+ __ or_(destination(), Operand(other_accumulator(destination())));
+ __ test(destination(), Immediate(kSmiTagMask));
+ __ j(not_zero, bailout(), not_taken);
}
- // Perform the store.
- __ mov(FieldOperand(ecx, offset), eax);
- // Preserve value from write barrier in case it's needed.
- __ mov(ebx, eax);
- __ RecordWrite(ecx, offset, ebx, edi);
+
+ // If we didn't bailout, the result (in fact, both inputs too) is known to
+ // be a smi.
+ set_as_smi(accumulator0());
+ set_as_smi(accumulator1());
}
-void FastCodeGenerator::Generate(FunctionLiteral* fun, CompilationInfo* info) {
- ASSERT(function_ == NULL);
+void FastCodeGenerator::Generate(CompilationInfo* compilation_info) {
ASSERT(info_ == NULL);
- function_ = fun;
- info_ = info;
+ info_ = compilation_info;
// Save the caller's frame pointer and set up our own.
Comment prologue_cmnt(masm(), ";; Prologue");
@@ -118,18 +191,42 @@ void FastCodeGenerator::Generate(FunctionLiteral* fun, CompilationInfo* info) {
// Note that we keep a live register reference to esi (context) at this
// point.
- // Receiver (this) is allocated to edx if there are this properties.
- if (has_this_properties()) EmitReceiverMapCheck();
+ // Receiver (this) is allocated to a fixed register.
+ if (info()->has_this_properties()) {
+ Comment cmnt(masm(), ";; MapCheck(this)");
+ if (FLAG_print_ir) {
+ PrintF("#: MapCheck(this)\n");
+ }
+ ASSERT(info()->has_receiver() && info()->receiver()->IsHeapObject());
+ Handle<HeapObject> object = Handle<HeapObject>::cast(info()->receiver());
+ Handle<Map> map(object->map());
+ EmitLoadReceiver();
+ __ CheckMap(receiver_reg(), map, bailout(), false);
+ }
+
+ // If there is a global variable access check if the global object is the
+ // same as at lazy-compilation time.
+ if (info()->has_globals()) {
+ Comment cmnt(masm(), ";; MapCheck(GLOBAL)");
+ if (FLAG_print_ir) {
+ PrintF("#: MapCheck(GLOBAL)\n");
+ }
+ ASSERT(info()->has_global_object());
+ Handle<Map> map(info()->global_object()->map());
+ __ mov(scratch0(), CodeGenerator::GlobalObject());
+ __ CheckMap(scratch0(), map, bailout(), true);
+ }
- VisitStatements(fun->body());
+ VisitStatements(function()->body());
Comment return_cmnt(masm(), ";; Return(<undefined>)");
+ if (FLAG_print_ir) {
+ PrintF("#: Return(<undefined>)\n");
+ }
__ mov(eax, Factory::undefined_value());
-
- Comment epilogue_cmnt(masm(), ";; Epilogue");
__ mov(esp, ebp);
__ pop(ebp);
- __ ret((fun->scope()->num_parameters() + 1) * kPointerSize);
+ __ ret((scope()->num_parameters() + 1) * kPointerSize);
__ bind(&bailout_);
}
diff --git a/deps/v8/src/ia32/full-codegen-ia32.cc b/deps/v8/src/ia32/full-codegen-ia32.cc
index 9f9ac56cc..2394bed62 100644
--- a/deps/v8/src/ia32/full-codegen-ia32.cc
+++ b/deps/v8/src/ia32/full-codegen-ia32.cc
@@ -51,9 +51,10 @@ namespace internal {
//
// The function builds a JS frame. Please see JavaScriptFrameConstants in
// frames-ia32.h for its layout.
-void FullCodeGenerator::Generate(FunctionLiteral* fun, Mode mode) {
- function_ = fun;
- SetFunctionPosition(fun);
+void FullCodeGenerator::Generate(CompilationInfo* info, Mode mode) {
+ ASSERT(info_ == NULL);
+ info_ = info;
+ SetFunctionPosition(function());
if (mode == PRIMARY) {
__ push(ebp); // Caller's frame pointer.
@@ -62,7 +63,7 @@ void FullCodeGenerator::Generate(FunctionLiteral* fun, Mode mode) {
__ push(edi); // Callee's JS Function.
{ Comment cmnt(masm_, "[ Allocate locals");
- int locals_count = fun->scope()->num_stack_slots();
+ int locals_count = scope()->num_stack_slots();
if (locals_count == 1) {
__ push(Immediate(Factory::undefined_value()));
} else if (locals_count > 1) {
@@ -76,7 +77,7 @@ void FullCodeGenerator::Generate(FunctionLiteral* fun, Mode mode) {
bool function_in_register = true;
// Possibly allocate a local context.
- if (fun->scope()->num_heap_slots() > 0) {
+ if (scope()->num_heap_slots() > 0) {
Comment cmnt(masm_, "[ Allocate local context");
// Argument to NewContext is the function, which is still in edi.
__ push(edi);
@@ -87,9 +88,9 @@ void FullCodeGenerator::Generate(FunctionLiteral* fun, Mode mode) {
__ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
// Copy parameters into context if necessary.
- int num_parameters = fun->scope()->num_parameters();
+ int num_parameters = scope()->num_parameters();
for (int i = 0; i < num_parameters; i++) {
- Slot* slot = fun->scope()->parameter(i)->slot();
+ Slot* slot = scope()->parameter(i)->slot();
if (slot != NULL && slot->type() == Slot::CONTEXT) {
int parameter_offset = StandardFrameConstants::kCallerSPOffset +
(num_parameters - 1 - i) * kPointerSize;
@@ -107,7 +108,7 @@ void FullCodeGenerator::Generate(FunctionLiteral* fun, Mode mode) {
}
}
- Variable* arguments = fun->scope()->arguments()->AsVariable();
+ Variable* arguments = scope()->arguments()->AsVariable();
if (arguments != NULL) {
// Function uses arguments object.
Comment cmnt(masm_, "[ Allocate arguments object");
@@ -117,10 +118,11 @@ void FullCodeGenerator::Generate(FunctionLiteral* fun, Mode mode) {
__ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
}
// Receiver is just before the parameters on the caller's stack.
- __ lea(edx, Operand(ebp, StandardFrameConstants::kCallerSPOffset +
- fun->num_parameters() * kPointerSize));
+ int offset = scope()->num_parameters() * kPointerSize;
+ __ lea(edx,
+ Operand(ebp, StandardFrameConstants::kCallerSPOffset + offset));
__ push(edx);
- __ push(Immediate(Smi::FromInt(fun->num_parameters())));
+ __ push(Immediate(Smi::FromInt(scope()->num_parameters())));
// Arguments to ArgumentsAccessStub:
// function, receiver address, parameter count.
// The stub will rewrite receiver and parameter count if the previous
@@ -130,13 +132,13 @@ void FullCodeGenerator::Generate(FunctionLiteral* fun, Mode mode) {
__ mov(ecx, eax); // Duplicate result.
Move(arguments->slot(), eax, ebx, edx);
Slot* dot_arguments_slot =
- fun->scope()->arguments_shadow()->AsVariable()->slot();
+ scope()->arguments_shadow()->AsVariable()->slot();
Move(dot_arguments_slot, ecx, ebx, edx);
}
}
{ Comment cmnt(masm_, "[ Declarations");
- VisitDeclarations(fun->scope()->declarations());
+ VisitDeclarations(scope()->declarations());
}
{ Comment cmnt(masm_, "[ Stack check");
@@ -156,14 +158,14 @@ void FullCodeGenerator::Generate(FunctionLiteral* fun, Mode mode) {
{ Comment cmnt(masm_, "[ Body");
ASSERT(loop_depth() == 0);
- VisitStatements(fun->body());
+ VisitStatements(function()->body());
ASSERT(loop_depth() == 0);
}
{ Comment cmnt(masm_, "[ return <undefined>;");
// Emit a 'return undefined' in case control fell off the end of the body.
__ mov(eax, Factory::undefined_value());
- EmitReturnSequence(function_->end_position());
+ EmitReturnSequence(function()->end_position());
}
}
@@ -190,7 +192,7 @@ void FullCodeGenerator::EmitReturnSequence(int position) {
// patch with the code required by the debugger.
__ mov(esp, ebp);
__ pop(ebp);
- __ ret((function_->scope()->num_parameters() + 1) * kPointerSize);
+ __ ret((scope()->num_parameters() + 1) * kPointerSize);
#ifdef ENABLE_DEBUGGER_SUPPORT
// Check that the size of the code used for returning matches what is
// expected by the debugger.
@@ -627,7 +629,7 @@ MemOperand FullCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
return Operand(ebp, SlotOffset(slot));
case Slot::CONTEXT: {
int context_chain_length =
- function_->scope()->ContextChainLength(slot->var()->scope());
+ scope()->ContextChainLength(slot->var()->scope());
__ LoadContext(scratch, context_chain_length);
return CodeGenerator::ContextOperand(scratch, slot->index());
}
@@ -686,7 +688,7 @@ void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
// this specific context.
// The variable in the decl always resides in the current context.
- ASSERT_EQ(0, function_->scope()->ContextChainLength(var->scope()));
+ ASSERT_EQ(0, scope()->ContextChainLength(var->scope()));
if (FLAG_debug_code) {
// Check if we have the correct context pointer.
__ mov(ebx,
@@ -764,7 +766,7 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// Call the runtime to declare the globals.
__ push(esi); // The context is the first argument.
__ push(Immediate(pairs));
- __ push(Immediate(Smi::FromInt(is_eval_ ? 1 : 0)));
+ __ push(Immediate(Smi::FromInt(is_eval() ? 1 : 0)));
__ CallRuntime(Runtime::kDeclareGlobals, 3);
// Return value is ignored.
}
@@ -775,7 +777,7 @@ void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
// Build the function boilerplate and instantiate it.
Handle<JSFunction> boilerplate =
- Compiler::BuildBoilerplate(expr, script_, this);
+ Compiler::BuildBoilerplate(expr, script(), this);
if (HasStackOverflow()) return;
ASSERT(boilerplate->IsBoilerplate());
@@ -806,7 +808,7 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var,
Comment cmnt(masm_, "Global variable");
// Use inline caching. Variable name is passed in ecx and the global
// object on the stack.
- __ push(CodeGenerator::GlobalObject());
+ __ mov(eax, CodeGenerator::GlobalObject());
__ mov(ecx, var->name());
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
@@ -815,7 +817,7 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var,
// Remember that the assembler may choose to do peephole optimization
// (eg, push/pop elimination).
__ nop();
- DropAndApply(1, context, eax);
+ Apply(context, eax);
} else if (slot != NULL && slot->type() == Slot::LOOKUP) {
Comment cmnt(masm_, "Lookup slot");
@@ -843,7 +845,7 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var,
// Load the object.
MemOperand object_loc = EmitSlotSearch(object_slot, eax);
- __ push(object_loc);
+ __ mov(edx, object_loc);
// Assert that the key is a smi.
Literal* key_literal = property->key()->AsLiteral();
@@ -851,7 +853,7 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var,
ASSERT(key_literal->handle()->IsSmi());
// Load the key.
- __ push(Immediate(key_literal->handle()));
+ __ mov(eax, Immediate(key_literal->handle()));
// Do a keyed property load.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
@@ -860,7 +862,7 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var,
// call. It is treated specially by the LoadIC code.
__ nop();
// Drop key and object left on the stack by IC.
- DropAndApply(2, context, eax);
+ Apply(context, eax);
}
}
@@ -1011,6 +1013,99 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
+void FullCodeGenerator::VisitAssignment(Assignment* expr) {
+ Comment cmnt(masm_, "[ Assignment");
+ ASSERT(expr->op() != Token::INIT_CONST);
+ // Left-hand side can only be a property, a global or a (parameter or local)
+ // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+ enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+ LhsKind assign_type = VARIABLE;
+ Property* prop = expr->target()->AsProperty();
+ if (prop != NULL) {
+ assign_type =
+ (prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
+ }
+
+ // Evaluate LHS expression.
+ switch (assign_type) {
+ case VARIABLE:
+ // Nothing to do here.
+ break;
+ case NAMED_PROPERTY:
+ if (expr->is_compound()) {
+ // We need the receiver both on the stack and in the accumulator.
+ VisitForValue(prop->obj(), kAccumulator);
+ __ push(result_register());
+ } else {
+ VisitForValue(prop->obj(), kStack);
+ }
+ break;
+ case KEYED_PROPERTY:
+ if (expr->is_compound()) {
+ VisitForValue(prop->obj(), kStack);
+ VisitForValue(prop->key(), kAccumulator);
+ __ mov(edx, Operand(esp, 0));
+ __ push(eax);
+ } else {
+ VisitForValue(prop->obj(), kStack);
+ VisitForValue(prop->key(), kStack);
+ }
+ break;
+ }
+
+ // If we have a compound assignment: Get value of LHS expression and
+ // store in on top of the stack.
+ if (expr->is_compound()) {
+ Location saved_location = location_;
+ location_ = kStack;
+ switch (assign_type) {
+ case VARIABLE:
+ EmitVariableLoad(expr->target()->AsVariableProxy()->var(),
+ Expression::kValue);
+ break;
+ case NAMED_PROPERTY:
+ EmitNamedPropertyLoad(prop);
+ __ push(result_register());
+ break;
+ case KEYED_PROPERTY:
+ EmitKeyedPropertyLoad(prop);
+ __ push(result_register());
+ break;
+ }
+ location_ = saved_location;
+ }
+
+ // Evaluate RHS expression.
+ Expression* rhs = expr->value();
+ VisitForValue(rhs, kAccumulator);
+
+ // If we have a compound assignment: Apply operator.
+ if (expr->is_compound()) {
+ Location saved_location = location_;
+ location_ = kAccumulator;
+ EmitBinaryOp(expr->binary_op(), Expression::kValue);
+ location_ = saved_location;
+ }
+
+ // Record source position before possible IC call.
+ SetSourcePosition(expr->position());
+
+ // Store the value.
+ switch (assign_type) {
+ case VARIABLE:
+ EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
+ context_);
+ break;
+ case NAMED_PROPERTY:
+ EmitNamedPropertyAssignment(expr);
+ break;
+ case KEYED_PROPERTY:
+ EmitKeyedPropertyAssignment(expr);
+ break;
+ }
+}
+
+
void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
@@ -1181,18 +1276,16 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
Comment cmnt(masm_, "[ Property");
Expression* key = expr->key();
- // Evaluate the receiver.
- VisitForValue(expr->obj(), kStack);
-
if (key->IsPropertyName()) {
+ VisitForValue(expr->obj(), kAccumulator);
EmitNamedPropertyLoad(expr);
- // Drop receiver left on the stack by IC.
- DropAndApply(1, context_, eax);
+ Apply(context_, eax);
} else {
- VisitForValue(expr->key(), kStack);
+ VisitForValue(expr->obj(), kStack);
+ VisitForValue(expr->key(), kAccumulator);
+ __ pop(edx);
EmitKeyedPropertyLoad(expr);
- // Drop key and receiver left on the stack by IC.
- DropAndApply(2, context_, eax);
+ Apply(context_, eax);
}
}
@@ -1263,25 +1356,31 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// Call to a keyed property, use keyed load IC followed by function
// call.
VisitForValue(prop->obj(), kStack);
- VisitForValue(prop->key(), kStack);
+ VisitForValue(prop->key(), kAccumulator);
// Record source code position for IC call.
SetSourcePosition(prop->position());
+ if (prop->is_synthetic()) {
+ __ pop(edx); // We do not need to keep the receiver.
+ } else {
+ __ mov(edx, Operand(esp, 0)); // Keep receiver, to call function on.
+ }
+
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET);
// By emitting a nop we make sure that we do not have a "test eax,..."
// instruction after the call it is treated specially by the LoadIC code.
__ nop();
- // Drop key left on the stack by IC.
- __ Drop(1);
- // Pop receiver.
- __ pop(ebx);
- // Push result (function).
- __ push(eax);
- // Push receiver object on stack.
if (prop->is_synthetic()) {
+ // Push result (function).
+ __ push(eax);
+ // Push Global receiver.
__ mov(ecx, CodeGenerator::GlobalObject());
__ push(FieldOperand(ecx, GlobalObject::kGlobalReceiverOffset));
} else {
+ // Pop receiver.
+ __ pop(ebx);
+ // Push result (function).
+ __ push(eax);
__ push(ebx);
}
EmitCallWithStub(expr);
@@ -1453,13 +1552,13 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
!proxy->var()->is_this() &&
proxy->var()->is_global()) {
Comment cmnt(masm_, "Global variable");
- __ push(CodeGenerator::GlobalObject());
+ __ mov(eax, CodeGenerator::GlobalObject());
__ mov(ecx, Immediate(proxy->name()));
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
// Use a regular load, not a contextual load, to avoid a reference
// error.
__ call(ic, RelocInfo::CODE_TARGET);
- __ mov(Operand(esp, 0), eax);
+ __ push(eax);
} else if (proxy != NULL &&
proxy->var()->slot() != NULL &&
proxy->var()->slot()->type() == Slot::LOOKUP) {
@@ -1563,11 +1662,16 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
if (expr->is_postfix() && context_ != Expression::kEffect) {
__ push(Immediate(Smi::FromInt(0)));
}
- VisitForValue(prop->obj(), kStack);
if (assign_type == NAMED_PROPERTY) {
+ // Put the object both on the stack and in the accumulator.
+ VisitForValue(prop->obj(), kAccumulator);
+ __ push(eax);
EmitNamedPropertyLoad(prop);
} else {
- VisitForValue(prop->key(), kStack);
+ VisitForValue(prop->obj(), kStack);
+ VisitForValue(prop->key(), kAccumulator);
+ __ mov(edx, Operand(esp, 0));
+ __ push(eax);
EmitKeyedPropertyLoad(prop);
}
}
diff --git a/deps/v8/src/ia32/ic-ia32.cc b/deps/v8/src/ia32/ic-ia32.cc
index 44dae3b4b..fcc82710b 100644
--- a/deps/v8/src/ia32/ic-ia32.cc
+++ b/deps/v8/src/ia32/ic-ia32.cc
@@ -50,28 +50,29 @@ namespace internal {
// or if name is not a symbol, and will jump to the miss_label in that case.
static void GenerateDictionaryLoad(MacroAssembler* masm,
Label* miss_label,
+ Register receiver,
+ Register name,
Register r0,
Register r1,
Register r2,
- Register name,
DictionaryCheck check_dictionary) {
// Register use:
//
+ // name - holds the name of the property and is unchanged.
+ // receiver - holds the receiver and is unchanged.
+ // Scratch registers:
// r0 - used to hold the property dictionary.
//
- // r1 - initially the receiver
- // - used for the index into the property dictionary
+ // r1 - used for the index into the property dictionary
// - holds the result on exit.
//
// r2 - used to hold the capacity of the property dictionary.
- //
- // name - holds the name of the property and is unchanged.
Label done;
// Check for the absence of an interceptor.
// Load the map into r0.
- __ mov(r0, FieldOperand(r1, JSObject::kMapOffset));
+ __ mov(r0, FieldOperand(receiver, JSObject::kMapOffset));
// Test the has_named_interceptor bit in the map.
__ test(FieldOperand(r0, Map::kInstanceAttributesOffset),
Immediate(1 << (Map::kHasNamedInterceptor + (3 * 8))));
@@ -91,7 +92,7 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
__ j(equal, miss_label, not_taken);
// Load properties array.
- __ mov(r0, FieldOperand(r1, JSObject::kPropertiesOffset));
+ __ mov(r0, FieldOperand(receiver, JSObject::kPropertiesOffset));
// Check that the properties array is a dictionary.
if (check_dictionary == CHECK_DICTIONARY) {
@@ -176,14 +177,12 @@ const int LoadIC::kOffsetToLoadInstruction = 13;
void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
// ----------- S t a t e -------------
+ // -- eax : receiver
// -- ecx : name
// -- esp[0] : return address
- // -- esp[4] : receiver
// -----------------------------------
Label miss;
- __ mov(eax, Operand(esp, kPointerSize));
-
StubCompiler::GenerateLoadArrayLength(masm, eax, edx, &miss);
__ bind(&miss);
StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
@@ -192,15 +191,13 @@ void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
void LoadIC::GenerateStringLength(MacroAssembler* masm) {
// ----------- S t a t e -------------
+ // -- eax : receiver
// -- ecx : name
// -- esp[0] : return address
- // -- esp[4] : receiver
// -----------------------------------
Label miss;
- __ mov(eax, Operand(esp, kPointerSize));
-
- StubCompiler::GenerateLoadStringLength(masm, eax, edx, &miss);
+ StubCompiler::GenerateLoadStringLength(masm, eax, edx, ebx, &miss);
__ bind(&miss);
StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
}
@@ -208,14 +205,12 @@ void LoadIC::GenerateStringLength(MacroAssembler* masm) {
void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
// ----------- S t a t e -------------
+ // -- eax : receiver
// -- ecx : name
// -- esp[0] : return address
- // -- esp[4] : receiver
// -----------------------------------
Label miss;
- __ mov(eax, Operand(esp, kPointerSize));
-
StubCompiler::GenerateLoadFunctionPrototype(masm, eax, edx, ebx, &miss);
__ bind(&miss);
StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
@@ -224,26 +219,22 @@ void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// ----------- S t a t e -------------
+ // -- eax : key
+ // -- edx : receiver
// -- esp[0] : return address
- // -- esp[4] : name
- // -- esp[8] : receiver
// -----------------------------------
Label slow, check_string, index_int, index_string;
Label check_pixel_array, probe_dictionary;
- // Load name and receiver.
- __ mov(eax, Operand(esp, kPointerSize));
- __ mov(ecx, Operand(esp, 2 * kPointerSize));
-
// Check that the object isn't a smi.
- __ test(ecx, Immediate(kSmiTagMask));
+ __ test(edx, Immediate(kSmiTagMask));
__ j(zero, &slow, not_taken);
// Get the map of the receiver.
- __ mov(edx, FieldOperand(ecx, HeapObject::kMapOffset));
+ __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
// Check bit field.
- __ movzx_b(ebx, FieldOperand(edx, Map::kBitFieldOffset));
+ __ movzx_b(ebx, FieldOperand(ecx, Map::kBitFieldOffset));
__ test(ebx, Immediate(kSlowCaseBitFieldMask));
__ j(not_zero, &slow, not_taken);
// Check that the object is some kind of JS object EXCEPT JS Value type.
@@ -251,56 +242,58 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// we enter the runtime system to make sure that indexing
// into string objects work as intended.
ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
- __ movzx_b(edx, FieldOperand(edx, Map::kInstanceTypeOffset));
- __ cmp(edx, JS_OBJECT_TYPE);
- __ j(less, &slow, not_taken);
+ __ CmpInstanceType(ecx, JS_OBJECT_TYPE);
+ __ j(below, &slow, not_taken);
// Check that the key is a smi.
__ test(eax, Immediate(kSmiTagMask));
__ j(not_zero, &check_string, not_taken);
- __ sar(eax, kSmiTagSize);
+ __ mov(ebx, eax);
+ __ SmiUntag(ebx);
// Get the elements array of the object.
__ bind(&index_int);
- __ mov(ecx, FieldOperand(ecx, JSObject::kElementsOffset));
+ __ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
- __ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
- Immediate(Factory::fixed_array_map()));
- __ j(not_equal, &check_pixel_array);
+ __ CheckMap(ecx, Factory::fixed_array_map(), &check_pixel_array, true);
// Check that the key (index) is within bounds.
- __ cmp(eax, FieldOperand(ecx, FixedArray::kLengthOffset));
+ __ cmp(ebx, FieldOperand(ecx, FixedArray::kLengthOffset));
__ j(above_equal, &slow);
// Fast case: Do the load.
- __ mov(eax,
- Operand(ecx, eax, times_4, FixedArray::kHeaderSize - kHeapObjectTag));
- __ cmp(Operand(eax), Immediate(Factory::the_hole_value()));
+ __ mov(ecx, FieldOperand(ecx, ebx, times_4, FixedArray::kHeaderSize));
+ __ cmp(Operand(ecx), Immediate(Factory::the_hole_value()));
// In case the loaded value is the_hole we have to consult GetProperty
// to ensure the prototype chain is searched.
__ j(equal, &slow);
+ __ mov(eax, ecx);
__ IncrementCounter(&Counters::keyed_load_generic_smi, 1);
__ ret(0);
- // Check whether the elements is a pixel array.
- // eax: untagged index
- // ecx: elements array
__ bind(&check_pixel_array);
- __ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
- Immediate(Factory::pixel_array_map()));
- __ j(not_equal, &slow);
- __ cmp(eax, FieldOperand(ecx, PixelArray::kLengthOffset));
+ // Check whether the elements is a pixel array.
+ // edx: receiver
+ // ebx: untagged index
+ // eax: key
+ // ecx: elements
+ __ CheckMap(ecx, Factory::pixel_array_map(), &slow, true);
+ __ cmp(ebx, FieldOperand(ecx, PixelArray::kLengthOffset));
__ j(above_equal, &slow);
- __ mov(ecx, FieldOperand(ecx, PixelArray::kExternalPointerOffset));
- __ movzx_b(eax, Operand(ecx, eax, times_1, 0));
- __ shl(eax, kSmiTagSize);
+ __ mov(eax, FieldOperand(ecx, PixelArray::kExternalPointerOffset));
+ __ movzx_b(eax, Operand(eax, ebx, times_1, 0));
+ __ SmiTag(eax);
__ ret(0);
- // Slow case: Load name and receiver from stack and jump to runtime.
__ bind(&slow);
+ // Slow case: jump to runtime.
+ // edx: receiver
+ // eax: key
__ IncrementCounter(&Counters::keyed_load_generic_slow, 1);
- Generate(masm, ExternalReference(Runtime::kKeyedGetProperty));
+ GenerateRuntimeGetProperty(masm);
__ bind(&check_string);
// The key is not a smi.
// Is it a string?
- __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, edx);
+ // edx: receiver
+ // eax: key
+ __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ecx);
__ j(above_equal, &slow);
// Is the string an array index, with cached numeric value?
__ mov(ebx, FieldOperand(eax, String::kHashFieldOffset));
@@ -308,55 +301,58 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ j(not_zero, &index_string, not_taken);
// Is the string a symbol?
- __ movzx_b(ebx, FieldOperand(edx, Map::kInstanceTypeOffset));
+ __ movzx_b(ebx, FieldOperand(ecx, Map::kInstanceTypeOffset));
ASSERT(kSymbolTag != 0);
__ test(ebx, Immediate(kIsSymbolMask));
__ j(zero, &slow, not_taken);
// If the receiver is a fast-case object, check the keyed lookup
- // cache. Otherwise probe the dictionary leaving result in ecx.
- __ mov(ebx, FieldOperand(ecx, JSObject::kPropertiesOffset));
+ // cache. Otherwise probe the dictionary.
+ __ mov(ebx, FieldOperand(edx, JSObject::kPropertiesOffset));
__ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
Immediate(Factory::hash_table_map()));
__ j(equal, &probe_dictionary);
// Load the map of the receiver, compute the keyed lookup cache hash
// based on 32 bits of the map pointer and the string hash.
- __ mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
- __ mov(edx, ebx);
- __ shr(edx, KeyedLookupCache::kMapHashShift);
- __ mov(eax, FieldOperand(eax, String::kHashFieldOffset));
- __ shr(eax, String::kHashShift);
- __ xor_(edx, Operand(eax));
- __ and_(edx, KeyedLookupCache::kCapacityMask);
+ __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
+ __ mov(ecx, ebx);
+ __ shr(ecx, KeyedLookupCache::kMapHashShift);
+ __ mov(edi, FieldOperand(eax, String::kHashFieldOffset));
+ __ shr(edi, String::kHashShift);
+ __ xor_(ecx, Operand(edi));
+ __ and_(ecx, KeyedLookupCache::kCapacityMask);
// Load the key (consisting of map and symbol) from the cache and
// check for match.
ExternalReference cache_keys
= ExternalReference::keyed_lookup_cache_keys();
- __ mov(edi, edx);
+ __ mov(edi, ecx);
__ shl(edi, kPointerSizeLog2 + 1);
__ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys));
__ j(not_equal, &slow);
__ add(Operand(edi), Immediate(kPointerSize));
- __ mov(edi, Operand::StaticArray(edi, times_1, cache_keys));
- __ cmp(edi, Operand(esp, kPointerSize));
+ __ cmp(eax, Operand::StaticArray(edi, times_1, cache_keys));
__ j(not_equal, &slow);
// Get field offset and check that it is an in-object property.
+ // edx : receiver
+ // ebx : receiver's map
+ // eax : key
+ // ecx : lookup cache index
ExternalReference cache_field_offsets
= ExternalReference::keyed_lookup_cache_field_offsets();
- __ mov(eax,
- Operand::StaticArray(edx, times_pointer_size, cache_field_offsets));
- __ movzx_b(edx, FieldOperand(ebx, Map::kInObjectPropertiesOffset));
- __ cmp(eax, Operand(edx));
+ __ mov(edi,
+ Operand::StaticArray(ecx, times_pointer_size, cache_field_offsets));
+ __ movzx_b(ecx, FieldOperand(ebx, Map::kInObjectPropertiesOffset));
+ __ cmp(edi, Operand(ecx));
__ j(above_equal, &slow);
// Load in-object property.
- __ sub(eax, Operand(edx));
- __ movzx_b(edx, FieldOperand(ebx, Map::kInstanceSizeOffset));
- __ add(eax, Operand(edx));
- __ mov(eax, FieldOperand(ecx, eax, times_pointer_size, 0));
+ __ sub(edi, Operand(ecx));
+ __ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceSizeOffset));
+ __ add(ecx, Operand(edi));
+ __ mov(eax, FieldOperand(edx, ecx, times_pointer_size, 0));
__ ret(0);
// Do a quick inline probe of the receiver's dictionary, if it
@@ -364,13 +360,14 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ bind(&probe_dictionary);
GenerateDictionaryLoad(masm,
&slow,
- ebx,
- ecx,
edx,
eax,
+ ebx,
+ ecx,
+ edi,
DICTIONARY_CHECK_DONE);
- GenerateCheckNonObjectOrLoaded(masm, &slow, ecx, edx);
- __ mov(eax, Operand(ecx));
+ GenerateCheckNonObjectOrLoaded(masm, &slow, ecx, ebx);
+ __ mov(eax, ecx);
__ IncrementCounter(&Counters::keyed_load_generic_symbol, 1);
__ ret(0);
@@ -381,51 +378,47 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
(1 << String::kArrayIndexValueBits));
__ bind(&index_string);
- __ mov(eax, Operand(ebx));
- __ and_(eax, String::kArrayIndexHashMask);
- __ shr(eax, String::kHashShift);
+ __ and_(ebx, String::kArrayIndexHashMask);
+ __ shr(ebx, String::kHashShift);
__ jmp(&index_int);
}
void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
// ----------- S t a t e -------------
+ // -- eax : key
+ // -- edx : receiver
// -- esp[0] : return address
- // -- esp[4] : key
- // -- esp[8] : receiver
// -----------------------------------
Label miss, index_ok;
// Pop return address.
// Performing the load early is better in the common case.
- __ pop(eax);
+ __ pop(ebx);
- __ mov(ebx, Operand(esp, 1 * kPointerSize));
- __ test(ebx, Immediate(kSmiTagMask));
+ __ test(edx, Immediate(kSmiTagMask));
__ j(zero, &miss);
- __ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset));
+ __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
__ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
__ test(ecx, Immediate(kIsNotStringMask));
__ j(not_zero, &miss);
// Check if key is a smi or a heap number.
- __ mov(edx, Operand(esp, 0));
- __ test(edx, Immediate(kSmiTagMask));
+ __ test(eax, Immediate(kSmiTagMask));
__ j(zero, &index_ok);
- __ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset));
+ __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
__ cmp(ecx, Factory::heap_number_map());
__ j(not_equal, &miss);
__ bind(&index_ok);
- // Duplicate receiver and key since they are expected on the stack after
- // the KeyedLoadIC call.
- __ push(ebx); // receiver
- __ push(edx); // key
- __ push(eax); // return address
+ // Push receiver and key on the stack, and make a tail call.
+ __ push(edx); // receiver
+ __ push(eax); // key
+ __ push(ebx); // return address
__ InvokeBuiltin(Builtins::STRING_CHAR_AT, JUMP_FUNCTION);
__ bind(&miss);
- __ push(eax);
+ __ push(ebx);
GenerateMiss(masm);
}
@@ -433,18 +426,14 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
ExternalArrayType array_type) {
// ----------- S t a t e -------------
+ // -- eax : key
+ // -- edx : receiver
// -- esp[0] : return address
- // -- esp[4] : key
- // -- esp[8] : receiver
// -----------------------------------
Label slow, failed_allocation;
- // Load name and receiver.
- __ mov(eax, Operand(esp, kPointerSize));
- __ mov(ecx, Operand(esp, 2 * kPointerSize));
-
// Check that the object isn't a smi.
- __ test(ecx, Immediate(kSmiTagMask));
+ __ test(edx, Immediate(kSmiTagMask));
__ j(zero, &slow, not_taken);
// Check that the key is a smi.
@@ -452,59 +441,56 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
__ j(not_zero, &slow, not_taken);
// Get the map of the receiver.
- __ mov(edx, FieldOperand(ecx, HeapObject::kMapOffset));
+ __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
// Check that the receiver does not require access checks. We need
// to check this explicitly since this generic stub does not perform
// map checks.
- __ movzx_b(ebx, FieldOperand(edx, Map::kBitFieldOffset));
+ __ movzx_b(ebx, FieldOperand(ecx, Map::kBitFieldOffset));
__ test(ebx, Immediate(1 << Map::kIsAccessCheckNeeded));
__ j(not_zero, &slow, not_taken);
- // Get the instance type from the map of the receiver.
- __ movzx_b(edx, FieldOperand(edx, Map::kInstanceTypeOffset));
- // Check that the object is a JS object.
- __ cmp(edx, JS_OBJECT_TYPE);
+ __ CmpInstanceType(ecx, JS_OBJECT_TYPE);
__ j(not_equal, &slow, not_taken);
// Check that the elements array is the appropriate type of
// ExternalArray.
- // eax: index (as a smi)
- // ecx: JSObject
- __ mov(ecx, FieldOperand(ecx, JSObject::kElementsOffset));
+ __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
Handle<Map> map(Heap::MapForExternalArrayType(array_type));
- __ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
+ __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
Immediate(map));
__ j(not_equal, &slow, not_taken);
+ // eax: key, known to be a smi.
+ // edx: receiver, known to be a JSObject.
+ // ebx: elements object, known to be an external array.
// Check that the index is in range.
- __ sar(eax, kSmiTagSize); // Untag the index.
- __ cmp(eax, FieldOperand(ecx, ExternalArray::kLengthOffset));
+ __ mov(ecx, eax);
+ __ SmiUntag(ecx); // Untag the index.
+ __ cmp(ecx, FieldOperand(ebx, ExternalArray::kLengthOffset));
// Unsigned comparison catches both negative and too-large values.
__ j(above_equal, &slow);
- // eax: untagged index
- // ecx: elements array
- __ mov(ecx, FieldOperand(ecx, ExternalArray::kExternalPointerOffset));
- // ecx: base pointer of external storage
+ __ mov(ebx, FieldOperand(ebx, ExternalArray::kExternalPointerOffset));
+ // ebx: base pointer of external storage
switch (array_type) {
case kExternalByteArray:
- __ movsx_b(eax, Operand(ecx, eax, times_1, 0));
+ __ movsx_b(ecx, Operand(ebx, ecx, times_1, 0));
break;
case kExternalUnsignedByteArray:
- __ movzx_b(eax, Operand(ecx, eax, times_1, 0));
+ __ movzx_b(ecx, Operand(ebx, ecx, times_1, 0));
break;
case kExternalShortArray:
- __ movsx_w(eax, Operand(ecx, eax, times_2, 0));
+ __ movsx_w(ecx, Operand(ebx, ecx, times_2, 0));
break;
case kExternalUnsignedShortArray:
- __ movzx_w(eax, Operand(ecx, eax, times_2, 0));
+ __ movzx_w(ecx, Operand(ebx, ecx, times_2, 0));
break;
case kExternalIntArray:
case kExternalUnsignedIntArray:
- __ mov(eax, Operand(ecx, eax, times_4, 0));
+ __ mov(ecx, Operand(ebx, ecx, times_4, 0));
break;
case kExternalFloatArray:
- __ fld_s(Operand(ecx, eax, times_4, 0));
+ __ fld_s(Operand(ebx, ecx, times_4, 0));
break;
default:
UNREACHABLE();
@@ -512,7 +498,7 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
}
// For integer array types:
- // eax: value
+ // ecx: value
// For floating-point array type:
// FP(0): value
@@ -523,21 +509,19 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
// it to a HeapNumber.
Label box_int;
if (array_type == kExternalIntArray) {
- // See Smi::IsValid for why this works.
- __ mov(ebx, eax);
- __ add(Operand(ebx), Immediate(0x40000000));
- __ cmp(ebx, 0x80000000);
- __ j(above_equal, &box_int);
+ __ cmp(ecx, 0xC0000000);
+ __ j(sign, &box_int);
} else {
ASSERT_EQ(array_type, kExternalUnsignedIntArray);
// The test is different for unsigned int values. Since we need
- // the Smi-encoded result to be treated as unsigned, we can't
+ // the value to be in the range of a positive smi, we can't
// handle either of the top two bits being set in the value.
- __ test(eax, Immediate(0xC0000000));
+ __ test(ecx, Immediate(0xC0000000));
__ j(not_zero, &box_int);
}
- __ shl(eax, kSmiTagSize);
+ __ mov(eax, ecx);
+ __ SmiTag(eax);
__ ret(0);
__ bind(&box_int);
@@ -545,34 +529,37 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
// Allocate a HeapNumber for the int and perform int-to-double
// conversion.
if (array_type == kExternalIntArray) {
- __ push(eax);
+ __ push(ecx);
__ fild_s(Operand(esp, 0));
- __ pop(eax);
+ __ pop(ecx);
} else {
ASSERT(array_type == kExternalUnsignedIntArray);
// Need to zero-extend the value.
// There's no fild variant for unsigned values, so zero-extend
// to a 64-bit int manually.
__ push(Immediate(0));
- __ push(eax);
+ __ push(ecx);
__ fild_d(Operand(esp, 0));
- __ pop(eax);
- __ pop(eax);
+ __ pop(ecx);
+ __ pop(ecx);
}
// FP(0): value
- __ AllocateHeapNumber(eax, ebx, ecx, &failed_allocation);
+ __ AllocateHeapNumber(ecx, ebx, edi, &failed_allocation);
// Set the value.
+ __ mov(eax, ecx);
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ ret(0);
} else if (array_type == kExternalFloatArray) {
// For the floating-point array type, we need to always allocate a
// HeapNumber.
- __ AllocateHeapNumber(eax, ebx, ecx, &failed_allocation);
+ __ AllocateHeapNumber(ecx, ebx, edi, &failed_allocation);
// Set the value.
+ __ mov(eax, ecx);
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ ret(0);
} else {
- __ shl(eax, kSmiTagSize);
+ __ mov(eax, ecx);
+ __ SmiTag(eax);
__ ret(0);
}
@@ -583,10 +570,51 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
__ fincstp();
// Fall through to slow case.
- // Slow case: Load name and receiver from stack and jump to runtime.
+ // Slow case: Load key and receiver from stack and jump to runtime.
__ bind(&slow);
__ IncrementCounter(&Counters::keyed_load_external_array_slow, 1);
- Generate(masm, ExternalReference(Runtime::kKeyedGetProperty));
+ GenerateRuntimeGetProperty(masm);
+}
+
+
+void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ Label slow;
+
+ // Check that the receiver isn't a smi.
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(zero, &slow, not_taken);
+
+ // Check that the key is a smi.
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, &slow, not_taken);
+
+ // Get the map of the receiver.
+ __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+
+ // Check that it has indexed interceptor and access checks
+ // are not enabled for this object.
+ __ movzx_b(ecx, FieldOperand(ecx, Map::kBitFieldOffset));
+ __ and_(Operand(ecx), Immediate(kSlowCaseBitFieldMask));
+ __ cmp(Operand(ecx), Immediate(1 << Map::kHasIndexedInterceptor));
+ __ j(not_zero, &slow, not_taken);
+
+ // Everything is fine, call runtime.
+ __ pop(ecx);
+ __ push(edx); // receiver
+ __ push(eax); // key
+ __ push(ecx); // return address
+
+ // Perform tail call to the entry.
+ __ TailCallRuntime(ExternalReference(
+ IC_Utility(kKeyedLoadPropertyWithInterceptor)), 2, 1);
+
+ __ bind(&slow);
+ GenerateMiss(masm);
}
@@ -645,7 +673,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// Slow case: call runtime.
__ bind(&slow);
- Generate(masm, ExternalReference(Runtime::kSetProperty));
+ GenerateRuntimeSetProperty(masm);
// Check whether the elements is a pixel array.
// eax: value
@@ -918,7 +946,7 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
// Slow case: call runtime.
__ bind(&slow);
- Generate(masm, ExternalReference(Runtime::kSetProperty));
+ GenerateRuntimeSetProperty(masm);
}
@@ -1001,7 +1029,7 @@ static void GenerateNormalHelper(MacroAssembler* masm,
// Search dictionary - put result in register edi.
__ mov(edi, edx);
- GenerateDictionaryLoad(masm, miss, eax, edi, ebx, ecx, CHECK_DICTIONARY);
+ GenerateDictionaryLoad(masm, miss, edx, ecx, eax, edi, ebx, CHECK_DICTIONARY);
// Check that the result is not a smi.
__ test(edi, Immediate(kSmiTagMask));
@@ -1150,13 +1178,11 @@ Object* LoadIC_Miss(Arguments args);
void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
+ // -- eax : receiver
// -- ecx : name
// -- esp[0] : return address
- // -- esp[4] : receiver
// -----------------------------------
- __ mov(eax, Operand(esp, kPointerSize));
-
// Probe the stub cache.
Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC,
NOT_IN_LOOP,
@@ -1164,20 +1190,18 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
StubCache::GenerateProbe(masm, flags, eax, ecx, ebx, edx);
// Cache miss: Jump to runtime.
- Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
+ GenerateMiss(masm);
}
void LoadIC::GenerateNormal(MacroAssembler* masm) {
// ----------- S t a t e -------------
+ // -- eax : receiver
// -- ecx : name
// -- esp[0] : return address
- // -- esp[4] : receiver
// -----------------------------------
Label miss, probe, global;
- __ mov(eax, Operand(esp, kPointerSize));
-
// Check that the receiver isn't a smi.
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, &miss, not_taken);
@@ -1202,8 +1226,16 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
// Search the dictionary placing the result in eax.
__ bind(&probe);
- GenerateDictionaryLoad(masm, &miss, edx, eax, ebx, ecx, CHECK_DICTIONARY);
- GenerateCheckNonObjectOrLoaded(masm, &miss, eax, edx);
+ GenerateDictionaryLoad(masm,
+ &miss,
+ eax,
+ ecx,
+ edx,
+ edi,
+ ebx,
+ CHECK_DICTIONARY);
+ GenerateCheckNonObjectOrLoaded(masm, &miss, edi, edx);
+ __ mov(eax, edi);
__ ret(0);
// Global object access: Check access rights.
@@ -1213,37 +1245,24 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
// Cache miss: Restore receiver from stack and jump to runtime.
__ bind(&miss);
- __ mov(eax, Operand(esp, 1 * kPointerSize));
- Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
+ GenerateMiss(masm);
}
void LoadIC::GenerateMiss(MacroAssembler* masm) {
// ----------- S t a t e -------------
+ // -- eax : receiver
// -- ecx : name
// -- esp[0] : return address
- // -- esp[4] : receiver
- // -----------------------------------
-
- Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
-}
-
-
-void LoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[4] : receiver
// -----------------------------------
- __ mov(eax, Operand(esp, kPointerSize));
__ pop(ebx);
__ push(eax); // receiver
__ push(ecx); // name
__ push(ebx); // return address
// Perform tail call to the entry.
- __ TailCallRuntime(f, 2, 1);
+ __ TailCallRuntime(ExternalReference(IC_Utility(kLoadIC_Miss)), 2, 1);
}
@@ -1347,31 +1366,35 @@ Object* KeyedLoadIC_Miss(Arguments args);
void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
// ----------- S t a t e -------------
+ // -- eax : key
+ // -- edx : receiver
// -- esp[0] : return address
- // -- esp[4] : name
- // -- esp[8] : receiver
// -----------------------------------
- Generate(masm, ExternalReference(IC_Utility(kKeyedLoadIC_Miss)));
+ __ pop(ebx);
+ __ push(edx); // receiver
+ __ push(eax); // name
+ __ push(ebx); // return address
+
+ // Perform tail call to the entry.
+ __ TailCallRuntime(ExternalReference(IC_Utility(kKeyedLoadIC_Miss)), 2, 1);
}
-void KeyedLoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
+void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// ----------- S t a t e -------------
+ // -- eax : key
+ // -- edx : receiver
// -- esp[0] : return address
- // -- esp[4] : name
- // -- esp[8] : receiver
// -----------------------------------
- __ mov(eax, Operand(esp, kPointerSize));
- __ mov(ecx, Operand(esp, 2 * kPointerSize));
__ pop(ebx);
- __ push(ecx); // receiver
+ __ push(edx); // receiver
__ push(eax); // name
__ push(ebx); // return address
// Perform tail call to the entry.
- __ TailCallRuntime(f, 2, 1);
+ __ TailCallRuntime(ExternalReference(Runtime::kKeyedGetProperty), 2, 1);
}
@@ -1393,49 +1416,80 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
}
-void StoreIC::GenerateExtendStorage(MacroAssembler* masm) {
+void StoreIC::GenerateMiss(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : value
- // -- ecx : transition map
+ // -- ecx : name
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
__ pop(ebx);
- __ push(edx); // receiver
- __ push(ecx); // transition map
- __ push(eax); // value
- __ push(ebx); // return address
+ __ push(edx);
+ __ push(ecx);
+ __ push(eax);
+ __ push(ebx);
// Perform tail call to the entry.
- __ TailCallRuntime(
- ExternalReference(IC_Utility(kSharedStoreIC_ExtendStorage)), 3, 1);
+ __ TailCallRuntime(ExternalReference(IC_Utility(kStoreIC_Miss)), 3, 1);
}
-void StoreIC::GenerateMiss(MacroAssembler* masm) {
+void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : name
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
+ //
+ // This accepts as a receiver anything JSObject::SetElementsLength accepts
+ // (currently anything except for external and pixel arrays which means
+ // anything with elements of FixedArray type.), but currently is restricted
+ // to JSArray.
+ // Value must be a number, but only smis are accepted as the most common case.
- __ pop(ebx);
- __ push(edx);
- __ push(ecx);
- __ push(eax);
- __ push(ebx);
+ Label miss;
- // Perform tail call to the entry.
- __ TailCallRuntime(ExternalReference(IC_Utility(kStoreIC_Miss)), 3, 1);
+ Register receiver = edx;
+ Register value = eax;
+ Register scratch = ebx;
+
+ // Check that the receiver isn't a smi.
+ __ test(receiver, Immediate(kSmiTagMask));
+ __ j(zero, &miss, not_taken);
+
+ // Check that the object is a JS array.
+ __ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
+ __ j(not_equal, &miss, not_taken);
+
+ // Check that elements are FixedArray.
+ __ mov(scratch, FieldOperand(receiver, JSArray::kElementsOffset));
+ __ CmpObjectType(scratch, FIXED_ARRAY_TYPE, scratch);
+ __ j(not_equal, &miss, not_taken);
+
+ // Check that value is a smi.
+ __ test(value, Immediate(kSmiTagMask));
+ __ j(not_zero, &miss, not_taken);
+
+ // Prepare tail call to StoreIC_ArrayLength.
+ __ pop(scratch);
+ __ push(receiver);
+ __ push(value);
+ __ push(scratch); // return address
+
+ __ TailCallRuntime(ExternalReference(IC_Utility(kStoreIC_ArrayLength)), 2, 1);
+
+ __ bind(&miss);
+
+ GenerateMiss(masm);
}
// Defined in ic.cc.
Object* KeyedStoreIC_Miss(Arguments args);
-void KeyedStoreIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
+void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : value
// -- esp[0] : return address
@@ -1450,28 +1504,26 @@ void KeyedStoreIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
__ push(ecx);
// Do tail-call to runtime routine.
- __ TailCallRuntime(f, 3, 1);
+ __ TailCallRuntime(ExternalReference(Runtime::kSetProperty), 3, 1);
}
-void KeyedStoreIC::GenerateExtendStorage(MacroAssembler* masm) {
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : value
- // -- ecx : transition map
// -- esp[0] : return address
// -- esp[4] : key
// -- esp[8] : receiver
// -----------------------------------
- __ pop(ebx);
+ __ pop(ecx);
+ __ push(Operand(esp, 1 * kPointerSize));
__ push(Operand(esp, 1 * kPointerSize));
- __ push(ecx);
__ push(eax);
- __ push(ebx);
+ __ push(ecx);
// Do tail-call to runtime routine.
- __ TailCallRuntime(
- ExternalReference(IC_Utility(kSharedStoreIC_ExtendStorage)), 3, 1);
+ __ TailCallRuntime(ExternalReference(IC_Utility(kKeyedStoreIC_Miss)), 3, 1);
}
#undef __
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index 19a380b02..5ae3fe205 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -41,7 +41,6 @@ namespace internal {
MacroAssembler::MacroAssembler(void* buffer, int size)
: Assembler(buffer, size),
- unresolved_(0),
generating_stub_(false),
allow_stub_calls_(true),
code_object_(Heap::undefined_value()) {
@@ -308,6 +307,13 @@ void MacroAssembler::CopyRegistersFromStackToMemory(Register base,
}
}
}
+
+void MacroAssembler::DebugBreak() {
+ Set(eax, Immediate(0));
+ mov(ebx, Immediate(ExternalReference(Runtime::kDebugBreak)));
+ CEntryStub ces(1);
+ call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
+}
#endif
void MacroAssembler::Set(Register dst, const Immediate& x) {
@@ -338,6 +344,19 @@ void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
}
+void MacroAssembler::CheckMap(Register obj,
+ Handle<Map> map,
+ Label* fail,
+ bool is_heap_object) {
+ if (!is_heap_object) {
+ test(obj, Immediate(kSmiTagMask));
+ j(zero, fail);
+ }
+ cmp(FieldOperand(obj, HeapObject::kMapOffset), Immediate(map));
+ j(not_equal, fail);
+}
+
+
Condition MacroAssembler::IsObjectStringType(Register heap_object,
Register map,
Register instance_type) {
@@ -364,6 +383,17 @@ void MacroAssembler::FCmp() {
}
+void MacroAssembler::AbortIfNotNumber(Register object, const char* msg) {
+ Label ok;
+ test(object, Immediate(kSmiTagMask));
+ j(zero, &ok);
+ cmp(FieldOperand(object, HeapObject::kMapOffset),
+ Factory::heap_number_map());
+ Assert(equal, msg);
+ bind(&ok);
+}
+
+
void MacroAssembler::EnterFrame(StackFrame::Type type) {
push(ebp);
mov(ebp, Operand(esp));
@@ -396,12 +426,8 @@ void MacroAssembler::EnterExitFramePrologue(ExitFrame::Mode mode) {
// Reserve room for entry stack pointer and push the debug marker.
ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
- push(Immediate(0)); // saved entry sp, patched before call
- if (mode == ExitFrame::MODE_DEBUG) {
- push(Immediate(0));
- } else {
- push(Immediate(CodeObject()));
- }
+ push(Immediate(0)); // Saved entry sp, patched before call.
+ push(Immediate(CodeObject())); // Accessed from ExitFrame::code_slot.
// Save the frame pointer and the context in top.
ExternalReference c_entry_fp_address(Top::k_c_entry_fp_address);
@@ -538,6 +564,7 @@ void MacroAssembler::PopTryHandler() {
Register MacroAssembler::CheckMaps(JSObject* object, Register object_reg,
JSObject* holder, Register holder_reg,
Register scratch,
+ int save_at_depth,
Label* miss) {
// Make sure there's no overlap between scratch and the other
// registers.
@@ -545,7 +572,11 @@ Register MacroAssembler::CheckMaps(JSObject* object, Register object_reg,
// Keep track of the current object in register reg.
Register reg = object_reg;
- int depth = 1;
+ int depth = 0;
+
+ if (save_at_depth == depth) {
+ mov(Operand(esp, kPointerSize), object_reg);
+ }
// Check the maps in the prototype chain.
// Traverse the prototype chain from the object and do map checks.
@@ -577,7 +608,6 @@ Register MacroAssembler::CheckMaps(JSObject* object, Register object_reg,
// to it in the code. Load it from the map.
reg = holder_reg; // from now the object is in holder_reg
mov(reg, FieldOperand(scratch, Map::kPrototypeOffset));
-
} else {
// Check the map of the current object.
cmp(FieldOperand(reg, HeapObject::kMapOffset),
@@ -595,6 +625,10 @@ Register MacroAssembler::CheckMaps(JSObject* object, Register object_reg,
mov(reg, Handle<JSObject>(prototype));
}
+ if (save_at_depth == depth) {
+ mov(Operand(esp, kPointerSize), reg);
+ }
+
// Go to the next object in the prototype chain.
object = prototype;
}
@@ -605,7 +639,7 @@ Register MacroAssembler::CheckMaps(JSObject* object, Register object_reg,
j(not_equal, miss, not_taken);
// Log the check depth.
- LOG(IntEvent("check-maps-depth", depth));
+ LOG(IntEvent("check-maps-depth", depth + 1));
// Perform security check for access to the global object and return
// the holder register.
@@ -1122,6 +1156,16 @@ void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
}
+void MacroAssembler::CallExternalReference(ExternalReference ref,
+ int num_arguments) {
+ mov(eax, Immediate(num_arguments));
+ mov(ebx, Immediate(ref));
+
+ CEntryStub stub(1);
+ CallStub(&stub);
+}
+
+
Object* MacroAssembler::TryCallRuntime(Runtime::Function* f,
int num_arguments) {
if (f->nargs >= 0 && f->nargs != num_arguments) {
@@ -1342,10 +1386,22 @@ void MacroAssembler::InvokeFunction(Register fun,
}
-void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag) {
- bool resolved;
- Handle<Code> code = ResolveBuiltin(id, &resolved);
+void MacroAssembler::InvokeFunction(JSFunction* function,
+ const ParameterCount& actual,
+ InvokeFlag flag) {
+ ASSERT(function->is_compiled());
+ // Get the function and setup the context.
+ mov(edi, Immediate(Handle<JSFunction>(function)));
+ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+ // Invoke the cached code.
+ Handle<Code> code(function->code());
+ ParameterCount expected(function->shared()->formal_parameter_count());
+ InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag);
+}
+
+
+void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag) {
// Calls are not allowed in some stubs.
ASSERT(flag == JUMP_FUNCTION || allow_stub_calls());
@@ -1353,55 +1409,22 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag) {
// arguments match the expected number of arguments. Fake a
// parameter count to avoid emitting code to do the check.
ParameterCount expected(0);
- InvokeCode(Handle<Code>(code), expected, expected,
- RelocInfo::CODE_TARGET, flag);
-
- const char* name = Builtins::GetName(id);
- int argc = Builtins::GetArgumentsCount(id);
-
- if (!resolved) {
- uint32_t flags =
- Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
- Bootstrapper::FixupFlagsUseCodeObject::encode(false);
- Unresolved entry = { pc_offset() - sizeof(int32_t), flags, name };
- unresolved_.Add(entry);
- }
+ GetBuiltinEntry(edx, id);
+ InvokeCode(Operand(edx), expected, expected, flag);
}
void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
- bool resolved;
- Handle<Code> code = ResolveBuiltin(id, &resolved);
-
- const char* name = Builtins::GetName(id);
- int argc = Builtins::GetArgumentsCount(id);
-
- mov(Operand(target), Immediate(code));
- if (!resolved) {
- uint32_t flags =
- Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
- Bootstrapper::FixupFlagsUseCodeObject::encode(true);
- Unresolved entry = { pc_offset() - sizeof(int32_t), flags, name };
- unresolved_.Add(entry);
- }
- add(Operand(target), Immediate(Code::kHeaderSize - kHeapObjectTag));
-}
-
-
-Handle<Code> MacroAssembler::ResolveBuiltin(Builtins::JavaScript id,
- bool* resolved) {
- // Move the builtin function into the temporary function slot by
- // reading it from the builtins object. NOTE: We should be able to
- // reduce this to two instructions by putting the function table in
- // the global object instead of the "builtins" object and by using a
- // real register for the function.
- mov(edx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- mov(edx, FieldOperand(edx, GlobalObject::kBuiltinsOffset));
+ // Load the JavaScript builtin function from the builtins object.
+ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ mov(edi, FieldOperand(edi, GlobalObject::kBuiltinsOffset));
int builtins_offset =
JSBuiltinsObject::kJSBuiltinsOffset + (id * kPointerSize);
- mov(edi, FieldOperand(edx, builtins_offset));
-
- return Builtins::GetCode(id, resolved);
+ mov(edi, FieldOperand(edi, builtins_offset));
+ // Load the code entry point from the function into the target register.
+ mov(target, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ mov(target, FieldOperand(target, SharedFunctionInfo::kCodeOffset));
+ add(Operand(target), Immediate(Code::kHeaderSize - kHeapObjectTag));
}
@@ -1546,6 +1569,20 @@ void MacroAssembler::Abort(const char* msg) {
}
+void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
+ Register instance_type,
+ Register scratch,
+ Label *failure) {
+ if (!scratch.is(instance_type)) {
+ mov(scratch, instance_type);
+ }
+ and_(scratch,
+ kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask);
+ cmp(scratch, kStringTag | kSeqStringTag | kAsciiStringTag);
+ j(not_equal, failure);
+}
+
+
void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register object1,
Register object2,
Register scratch1,
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index cc245602d..69dc54ca8 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -69,6 +69,7 @@ class MacroAssembler: public Assembler {
void CopyRegistersFromStackToMemory(Register base,
Register scratch,
RegList regs);
+ void DebugBreak();
#endif
// ---------------------------------------------------------------------------
@@ -123,6 +124,10 @@ class MacroAssembler: public Assembler {
const ParameterCount& actual,
InvokeFlag flag);
+ void InvokeFunction(JSFunction* function,
+ const ParameterCount& actual,
+ InvokeFlag flag);
+
// Invoke specified builtin JavaScript function. Adds an entry to
// the unresolved list if the name does not resolve.
void InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag);
@@ -141,6 +146,14 @@ class MacroAssembler: public Assembler {
// Compare instance type for map.
void CmpInstanceType(Register map, InstanceType type);
+ // Check if the map of an object is equal to a specified map and
+ // branch to label if not. Skip the smi check if not required
+ // (object is known to be a heap object)
+ void CheckMap(Register obj,
+ Handle<Map> map,
+ Label* fail,
+ bool is_heap_object);
+
// Check if the object in register heap_object is a string. Afterwards the
// register map contains the object map and the register instance_type
// contains the instance_type. The registers map and instance_type can be the
@@ -163,6 +176,9 @@ class MacroAssembler: public Assembler {
sar(reg, kSmiTagSize);
}
+ // Abort execution if argument is not a number. Used in debug code.
+ void AbortIfNotNumber(Register object, const char* msg);
+
// ---------------------------------------------------------------------------
// Exception handling
@@ -185,9 +201,14 @@ class MacroAssembler: public Assembler {
// clobbered if it the same as the holder register. The function
// returns a register containing the holder - either object_reg or
// holder_reg.
+ // The function can optionally (when save_at_depth !=
+ // kInvalidProtoDepth) save the object at the given depth by moving
+ // it to [esp + kPointerSize].
Register CheckMaps(JSObject* object, Register object_reg,
JSObject* holder, Register holder_reg,
- Register scratch, Label* miss);
+ Register scratch,
+ int save_at_depth,
+ Label* miss);
// Generate code for checking access rights - used for security checks
// on access to global objects across environments. The holder register
@@ -339,6 +360,9 @@ class MacroAssembler: public Assembler {
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId id, int num_arguments);
+ // Convenience function: call an external reference.
+ void CallExternalReference(ExternalReference ref, int num_arguments);
+
// Convenience function: Same as above, but takes the fid instead.
Object* TryCallRuntime(Runtime::FunctionId id, int num_arguments);
@@ -376,13 +400,6 @@ class MacroAssembler: public Assembler {
void Move(Register target, Handle<Object> value);
- struct Unresolved {
- int pc;
- uint32_t flags; // see Bootstrapper::FixupFlags decoders/encoders.
- const char* name;
- };
- List<Unresolved>* unresolved() { return &unresolved_; }
-
Handle<Object> CodeObject() { return code_object_; }
@@ -418,6 +435,13 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// String utilities.
+ // Check whether the instance type represents a flat ascii string. Jump to the
+ // label if not. If the instance type can be scratched specify same register
+ // for both instance type and scratch.
+ void JumpIfInstanceTypeIsNotSequentialAscii(Register instance_type,
+ Register scratch,
+ Label *on_not_flat_ascii_string);
+
// Checks if both objects are sequential ASCII strings, and jumps to label
// if either is not.
void JumpIfNotBothSequentialAsciiStrings(Register object1,
@@ -427,11 +451,10 @@ class MacroAssembler: public Assembler {
Label *on_not_flat_ascii_strings);
private:
- List<Unresolved> unresolved_;
bool generating_stub_;
bool allow_stub_calls_;
- Handle<Object> code_object_; // This handle will be patched with the
- // code object on installation.
+ // This handle will be patched with the code object on installation.
+ Handle<Object> code_object_;
// Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected,
@@ -441,18 +464,6 @@ class MacroAssembler: public Assembler {
Label* done,
InvokeFlag flag);
- // Prepares for a call or jump to a builtin by doing two things:
- // 1. Emits code that fetches the builtin's function object from the context
- // at runtime, and puts it in the register rdi.
- // 2. Fetches the builtin's code object, and returns it in a handle, at
- // compile time, so that later code can emit instructions to jump or call
- // the builtin directly. If the code object has not yet been created, it
- // returns the builtin code object for IllegalFunction, and sets the
- // output parameter "resolved" to false. Code that uses the return value
- // should then add the address and the builtin name to the list of fixups
- // called unresolved_, which is fixed up by the bootstrapper.
- Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
-
// Activation support.
void EnterFrame(StackFrame::Type type);
void LeaveFrame(StackFrame::Type type);
diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc
index 7acf81c94..5729d9d74 100644
--- a/deps/v8/src/ia32/stub-cache-ia32.cc
+++ b/deps/v8/src/ia32/stub-cache-ia32.cc
@@ -152,22 +152,6 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
}
-static void PushInterceptorArguments(MacroAssembler* masm,
- Register receiver,
- Register holder,
- Register name,
- JSObject* holder_obj) {
- __ push(receiver);
- __ push(holder);
- __ push(name);
- InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
- ASSERT(!Heap::InNewSpace(interceptor));
- __ mov(receiver, Immediate(Handle<Object>(interceptor)));
- __ push(receiver);
- __ push(FieldOperand(receiver, InterceptorInfo::kDataOffset));
-}
-
-
void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
int index,
Register prototype) {
@@ -226,30 +210,32 @@ static void GenerateStringCheck(MacroAssembler* masm,
void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
Register receiver,
- Register scratch,
+ Register scratch1,
+ Register scratch2,
Label* miss) {
- Label load_length, check_wrapper;
+ Label check_wrapper;
// Check if the object is a string leaving the instance type in the
// scratch register.
- GenerateStringCheck(masm, receiver, scratch, miss, &check_wrapper);
+ GenerateStringCheck(masm, receiver, scratch1, miss, &check_wrapper);
// Load length from the string and convert to a smi.
- __ bind(&load_length);
__ mov(eax, FieldOperand(receiver, String::kLengthOffset));
__ SmiTag(eax);
__ ret(0);
// Check if the object is a JSValue wrapper.
__ bind(&check_wrapper);
- __ cmp(scratch, JS_VALUE_TYPE);
+ __ cmp(scratch1, JS_VALUE_TYPE);
__ j(not_equal, miss, not_taken);
// Check if the wrapped value is a string and load the length
// directly if it is.
- __ mov(receiver, FieldOperand(receiver, JSValue::kValueOffset));
- GenerateStringCheck(masm, receiver, scratch, miss, miss);
- __ jmp(&load_length);
+ __ mov(scratch2, FieldOperand(receiver, JSValue::kValueOffset));
+ GenerateStringCheck(masm, scratch2, scratch1, miss, miss);
+ __ mov(eax, FieldOperand(scratch2, String::kLengthOffset));
+ __ SmiTag(eax);
+ __ ret(0);
}
@@ -285,20 +271,31 @@ void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
}
+static void PushInterceptorArguments(MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ Register name,
+ JSObject* holder_obj) {
+ __ push(receiver);
+ __ push(holder);
+ __ push(name);
+ InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
+ ASSERT(!Heap::InNewSpace(interceptor));
+ __ mov(receiver, Immediate(Handle<Object>(interceptor)));
+ __ push(receiver);
+ __ push(FieldOperand(receiver, InterceptorInfo::kDataOffset));
+}
+
+
static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
Register receiver,
Register holder,
Register name,
JSObject* holder_obj) {
PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
-
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly));
- __ mov(eax, Immediate(5));
- __ mov(ebx, Immediate(ref));
-
- CEntryStub stub(1);
- __ CallStub(&stub);
+ __ CallExternalReference(
+ ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly)),
+ 5);
}
@@ -326,7 +323,7 @@ static void CompileLoadInterceptor(Compiler* compiler,
stub_compiler->CheckPrototypes(object, receiver, holder,
scratch1, scratch2, name, miss);
- if (lookup->IsValid() && lookup->IsCacheable()) {
+ if (lookup->IsProperty() && lookup->IsCacheable()) {
compiler->CompileCacheable(masm,
stub_compiler,
receiver,
@@ -362,7 +359,7 @@ class LoadInterceptorCompiler BASE_EMBEDDED {
LookupResult* lookup,
String* name,
Label* miss_label) {
- AccessorInfo* callback = 0;
+ AccessorInfo* callback = NULL;
bool optimize = false;
// So far the most popular follow ups for interceptor loads are FIELD
// and CALLBACKS, so inline only them, other cases may be added
@@ -479,88 +476,337 @@ class LoadInterceptorCompiler BASE_EMBEDDED {
};
+// Holds information about possible function call optimizations.
+class CallOptimization BASE_EMBEDDED {
+ public:
+ explicit CallOptimization(LookupResult* lookup)
+ : constant_function_(NULL),
+ is_simple_api_call_(false),
+ expected_receiver_type_(NULL),
+ api_call_info_(NULL) {
+ if (!lookup->IsProperty() || !lookup->IsCacheable()) return;
+
+ // We only optimize constant function calls.
+ if (lookup->type() != CONSTANT_FUNCTION) return;
+
+ Initialize(lookup->GetConstantFunction());
+ }
+
+ explicit CallOptimization(JSFunction* function) {
+ Initialize(function);
+ }
+
+ bool is_constant_call() const {
+ return constant_function_ != NULL;
+ }
+
+ JSFunction* constant_function() const {
+ ASSERT(constant_function_ != NULL);
+ return constant_function_;
+ }
+
+ bool is_simple_api_call() const {
+ return is_simple_api_call_;
+ }
+
+ FunctionTemplateInfo* expected_receiver_type() const {
+ ASSERT(is_simple_api_call_);
+ return expected_receiver_type_;
+ }
+
+ CallHandlerInfo* api_call_info() const {
+ ASSERT(is_simple_api_call_);
+ return api_call_info_;
+ }
+
+ // Returns the depth of the object having the expected type in the
+ // prototype chain between the two arguments.
+ int GetPrototypeDepthOfExpectedType(JSObject* object,
+ JSObject* holder) const {
+ ASSERT(is_simple_api_call_);
+ if (expected_receiver_type_ == NULL) return 0;
+ int depth = 0;
+ while (object != holder) {
+ if (object->IsInstanceOf(expected_receiver_type_)) return depth;
+ object = JSObject::cast(object->GetPrototype());
+ ++depth;
+ }
+ if (holder->IsInstanceOf(expected_receiver_type_)) return depth;
+ return kInvalidProtoDepth;
+ }
+
+ private:
+ void Initialize(JSFunction* function) {
+ if (!function->is_compiled()) return;
+
+ constant_function_ = function;
+ is_simple_api_call_ = false;
+
+ AnalyzePossibleApiFunction(function);
+ }
+
+ // Determines whether the given function can be called using the
+ // fast api call builtin.
+ void AnalyzePossibleApiFunction(JSFunction* function) {
+ SharedFunctionInfo* sfi = function->shared();
+ if (sfi->function_data()->IsUndefined()) return;
+ FunctionTemplateInfo* info =
+ FunctionTemplateInfo::cast(sfi->function_data());
+
+ // Require a C++ callback.
+ if (info->call_code()->IsUndefined()) return;
+ api_call_info_ = CallHandlerInfo::cast(info->call_code());
+
+ // Accept signatures that either have no restrictions at all or
+ // only have restrictions on the receiver.
+ if (!info->signature()->IsUndefined()) {
+ SignatureInfo* signature = SignatureInfo::cast(info->signature());
+ if (!signature->args()->IsUndefined()) return;
+ if (!signature->receiver()->IsUndefined()) {
+ expected_receiver_type_ =
+ FunctionTemplateInfo::cast(signature->receiver());
+ }
+ }
+
+ is_simple_api_call_ = true;
+ }
+
+ JSFunction* constant_function_;
+ bool is_simple_api_call_;
+ FunctionTemplateInfo* expected_receiver_type_;
+ CallHandlerInfo* api_call_info_;
+};
+
+
+// Reserves space for the extra arguments to FastHandleApiCall in the
+// caller's frame.
+//
+// These arguments are set by CheckPrototypes and GenerateFastApiCall.
+static void ReserveSpaceForFastApiCall(MacroAssembler* masm, Register scratch) {
+ // ----------- S t a t e -------------
+ // -- esp[0] : return address
+ // -- esp[4] : last argument in the internal frame of the caller
+ // -----------------------------------
+ __ pop(scratch);
+ __ push(Immediate(Smi::FromInt(0)));
+ __ push(Immediate(Smi::FromInt(0)));
+ __ push(Immediate(Smi::FromInt(0)));
+ __ push(Immediate(Smi::FromInt(0)));
+ __ push(scratch);
+}
+
+
+// Undoes the effects of ReserveSpaceForFastApiCall.
+static void FreeSpaceForFastApiCall(MacroAssembler* masm, Register scratch) {
+ // ----------- S t a t e -------------
+ // -- esp[0] : return address
+ // -- esp[4] : last fast api call extra argument
+ // -- ...
+ // -- esp[16] : first fast api call extra argument
+ // -- esp[20] : last argument in the internal frame
+ // -----------------------------------
+ __ pop(scratch);
+ __ add(Operand(esp), Immediate(kPointerSize * 4));
+ __ push(scratch);
+}
+
+
+// Generates call to FastHandleApiCall builtin.
+static void GenerateFastApiCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ int argc) {
+ // ----------- S t a t e -------------
+ // -- esp[0] : return address
+ // -- esp[4] : object passing the type check
+ // (last fast api call extra argument,
+ // set by CheckPrototypes)
+ // -- esp[8] : api call data
+ // -- esp[12] : api callback
+ // -- esp[16] : api function
+ // (first fast api call extra argument)
+ // -- esp[20] : last argument
+ // -- ...
+ // -- esp[(argc + 5) * 4] : first argument
+ // -- esp[(argc + 6) * 4] : receiver
+ // -----------------------------------
+
+ // Get the function and setup the context.
+ JSFunction* function = optimization.constant_function();
+ __ mov(edi, Immediate(Handle<JSFunction>(function)));
+ __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+
+ // Pass the additional arguments FastHandleApiCall expects.
+ __ mov(Operand(esp, 4 * kPointerSize), edi);
+ bool info_loaded = false;
+ Object* callback = optimization.api_call_info()->callback();
+ if (Heap::InNewSpace(callback)) {
+ info_loaded = true;
+ __ mov(ecx, Handle<CallHandlerInfo>(optimization.api_call_info()));
+ __ mov(ebx, FieldOperand(ecx, CallHandlerInfo::kCallbackOffset));
+ __ mov(Operand(esp, 3 * kPointerSize), ebx);
+ } else {
+ __ mov(Operand(esp, 3 * kPointerSize), Immediate(Handle<Object>(callback)));
+ }
+ Object* call_data = optimization.api_call_info()->data();
+ if (Heap::InNewSpace(call_data)) {
+ if (!info_loaded) {
+ __ mov(ecx, Handle<CallHandlerInfo>(optimization.api_call_info()));
+ }
+ __ mov(ebx, FieldOperand(ecx, CallHandlerInfo::kDataOffset));
+ __ mov(Operand(esp, 2 * kPointerSize), ebx);
+ } else {
+ __ mov(Operand(esp, 2 * kPointerSize),
+ Immediate(Handle<Object>(call_data)));
+ }
+
+ // Set the number of arguments.
+ __ mov(eax, Immediate(argc + 4));
+
+ // Jump to the fast api call builtin (tail call).
+ Handle<Code> code = Handle<Code>(
+ Builtins::builtin(Builtins::FastHandleApiCall));
+ ParameterCount expected(0);
+ __ InvokeCode(code, expected, expected,
+ RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+}
+
+
class CallInterceptorCompiler BASE_EMBEDDED {
public:
- CallInterceptorCompiler(const ParameterCount& arguments, Register name)
- : arguments_(arguments), argc_(arguments.immediate()), name_(name) {}
+ CallInterceptorCompiler(StubCompiler* stub_compiler,
+ const ParameterCount& arguments,
+ Register name)
+ : stub_compiler_(stub_compiler),
+ arguments_(arguments),
+ name_(name) {}
+
+ void Compile(MacroAssembler* masm,
+ JSObject* object,
+ JSObject* holder,
+ String* name,
+ LookupResult* lookup,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Label* miss) {
+ ASSERT(holder->HasNamedInterceptor());
+ ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
+
+ // Check that the receiver isn't a smi.
+ __ test(receiver, Immediate(kSmiTagMask));
+ __ j(zero, miss, not_taken);
+
+ CallOptimization optimization(lookup);
+
+ if (optimization.is_constant_call() &&
+ !Top::CanHaveSpecialFunctions(holder)) {
+ CompileCacheable(masm,
+ object,
+ receiver,
+ scratch1,
+ scratch2,
+ holder,
+ lookup,
+ name,
+ optimization,
+ miss);
+ } else {
+ CompileRegular(masm,
+ object,
+ receiver,
+ scratch1,
+ scratch2,
+ name,
+ holder,
+ miss);
+ }
+ }
+ private:
void CompileCacheable(MacroAssembler* masm,
- StubCompiler* stub_compiler,
+ JSObject* object,
Register receiver,
- Register holder,
Register scratch1,
Register scratch2,
JSObject* holder_obj,
LookupResult* lookup,
String* name,
+ const CallOptimization& optimization,
Label* miss_label) {
- JSFunction* function = 0;
- bool optimize = false;
- // So far the most popular case for failed interceptor is
- // CONSTANT_FUNCTION sitting below.
- if (lookup->type() == CONSTANT_FUNCTION) {
- function = lookup->GetConstantFunction();
- // JSArray holder is a special case for call constant function
- // (see the corresponding code).
- if (function->is_compiled() && !holder_obj->IsJSArray()) {
- optimize = true;
+ ASSERT(optimization.is_constant_call());
+ ASSERT(!lookup->holder()->IsGlobalObject());
+
+ int depth1 = kInvalidProtoDepth;
+ int depth2 = kInvalidProtoDepth;
+ bool can_do_fast_api_call = false;
+ if (optimization.is_simple_api_call() &&
+ !lookup->holder()->IsGlobalObject()) {
+ depth1 = optimization.GetPrototypeDepthOfExpectedType(object, holder_obj);
+ if (depth1 == kInvalidProtoDepth) {
+ depth2 = optimization.GetPrototypeDepthOfExpectedType(holder_obj,
+ lookup->holder());
}
+ can_do_fast_api_call = (depth1 != kInvalidProtoDepth) ||
+ (depth2 != kInvalidProtoDepth);
}
- if (!optimize) {
- CompileRegular(masm, receiver, holder, scratch2, holder_obj, miss_label);
- return;
+ __ IncrementCounter(&Counters::call_const_interceptor, 1);
+
+ if (can_do_fast_api_call) {
+ __ IncrementCounter(&Counters::call_const_interceptor_fast_api, 1);
+ ReserveSpaceForFastApiCall(masm, scratch1);
}
- __ EnterInternalFrame();
- __ push(holder); // Save the holder.
- __ push(name_); // Save the name.
+ Label miss_cleanup;
+ Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
+ Register holder =
+ stub_compiler_->CheckPrototypes(object, receiver, holder_obj,
+ scratch1, scratch2, name,
+ depth1, miss);
- CompileCallLoadPropertyWithInterceptor(masm,
- receiver,
- holder,
- name_,
- holder_obj);
+ Label regular_invoke;
+ LoadWithInterceptor(masm, receiver, holder, holder_obj, &regular_invoke);
- __ pop(name_); // Restore the name.
- __ pop(receiver); // Restore the holder.
- __ LeaveInternalFrame();
+ // Generate code for the failed interceptor case.
- __ cmp(eax, Factory::no_interceptor_result_sentinel());
- Label invoke;
- __ j(not_equal, &invoke);
-
- stub_compiler->CheckPrototypes(holder_obj, receiver,
- lookup->holder(), scratch1,
- scratch2,
- name,
- miss_label);
- if (lookup->holder()->IsGlobalObject()) {
- __ mov(edx, Operand(esp, (argc_ + 1) * kPointerSize));
- __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
- __ mov(Operand(esp, (argc_ + 1) * kPointerSize), edx);
- }
+ // Check the lookup is still valid.
+ stub_compiler_->CheckPrototypes(holder_obj, receiver,
+ lookup->holder(),
+ scratch1, scratch2, name,
+ depth2, miss);
- ASSERT(function->is_compiled());
- // Get the function and setup the context.
- __ mov(edi, Immediate(Handle<JSFunction>(function)));
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+ if (can_do_fast_api_call) {
+ GenerateFastApiCall(masm, optimization, arguments_.immediate());
+ } else {
+ __ InvokeFunction(optimization.constant_function(), arguments_,
+ JUMP_FUNCTION);
+ }
- // Jump to the cached code (tail call).
- Handle<Code> code(function->code());
- ParameterCount expected(function->shared()->formal_parameter_count());
- __ InvokeCode(code, expected, arguments_,
- RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+ if (can_do_fast_api_call) {
+ __ bind(&miss_cleanup);
+ FreeSpaceForFastApiCall(masm, scratch1);
+ __ jmp(miss_label);
+ }
- __ bind(&invoke);
+ __ bind(&regular_invoke);
+ if (can_do_fast_api_call) {
+ FreeSpaceForFastApiCall(masm, scratch1);
+ }
}
void CompileRegular(MacroAssembler* masm,
+ JSObject* object,
Register receiver,
- Register holder,
- Register scratch,
+ Register scratch1,
+ Register scratch2,
+ String* name,
JSObject* holder_obj,
Label* miss_label) {
+ Register holder =
+ stub_compiler_->CheckPrototypes(object, receiver, holder_obj,
+ scratch1, scratch2, name,
+ miss_label);
+
__ EnterInternalFrame();
// Save the name_ register across the call.
__ push(name_);
@@ -571,22 +817,41 @@ class CallInterceptorCompiler BASE_EMBEDDED {
name_,
holder_obj);
- ExternalReference ref = ExternalReference(
- IC_Utility(IC::kLoadPropertyWithInterceptorForCall));
- __ mov(eax, Immediate(5));
- __ mov(ebx, Immediate(ref));
-
- CEntryStub stub(1);
- __ CallStub(&stub);
+ __ CallExternalReference(
+ ExternalReference(
+ IC_Utility(IC::kLoadPropertyWithInterceptorForCall)),
+ 5);
// Restore the name_ register.
__ pop(name_);
__ LeaveInternalFrame();
}
- private:
+ void LoadWithInterceptor(MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ JSObject* holder_obj,
+ Label* interceptor_succeeded) {
+ __ EnterInternalFrame();
+ __ push(holder); // Save the holder.
+ __ push(name_); // Save the name.
+
+ CompileCallLoadPropertyWithInterceptor(masm,
+ receiver,
+ holder,
+ name_,
+ holder_obj);
+
+ __ pop(name_); // Restore the name.
+ __ pop(receiver); // Restore the holder.
+ __ LeaveInternalFrame();
+
+ __ cmp(eax, Factory::no_interceptor_result_sentinel());
+ __ j(not_equal, interceptor_succeeded);
+ }
+
+ StubCompiler* stub_compiler_;
const ParameterCount& arguments_;
- int argc_;
Register name_;
};
@@ -605,8 +870,9 @@ void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
}
+// Both name_reg and receiver_reg are preserved on jumps to miss_label,
+// but may be destroyed if store is successful.
void StubCompiler::GenerateStoreField(MacroAssembler* masm,
- Builtins::Name storage_extend,
JSObject* object,
int index,
Map* transition,
@@ -636,9 +902,13 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
if ((transition != NULL) && (object->map()->unused_property_fields() == 0)) {
// The properties must be extended before we can store the value.
// We jump to a runtime call that extends the properties array.
- __ mov(ecx, Immediate(Handle<Map>(transition)));
- Handle<Code> ic(Builtins::builtin(storage_extend));
- __ jmp(ic, RelocInfo::CODE_TARGET);
+ __ pop(scratch); // Return address.
+ __ push(receiver_reg);
+ __ push(Immediate(Handle<Map>(transition)));
+ __ push(eax);
+ __ push(scratch);
+ __ TailCallRuntime(
+ ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage)), 3, 1);
return;
}
@@ -691,10 +961,12 @@ Register StubCompiler::CheckPrototypes(JSObject* object,
Register holder_reg,
Register scratch,
String* name,
+ int push_at_depth,
Label* miss) {
// Check that the maps haven't changed.
Register result =
- masm()->CheckMaps(object, object_reg, holder, holder_reg, scratch, miss);
+ masm()->CheckMaps(object, object_reg, holder, holder_reg, scratch,
+ push_at_depth, miss);
// If we've skipped any global objects, it's not enough to verify
// that their maps haven't changed.
@@ -716,7 +988,7 @@ Register StubCompiler::CheckPrototypes(JSObject* object,
object = JSObject::cast(object->GetPrototype());
}
- // Return the register containin the holder.
+ // Return the register containing the holder.
return result;
}
@@ -887,7 +1159,7 @@ Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
}
-Object* CallStubCompiler::CompileCallField(Object* object,
+Object* CallStubCompiler::CompileCallField(JSObject* object,
JSObject* holder,
int index,
String* name) {
@@ -909,9 +1181,7 @@ Object* CallStubCompiler::CompileCallField(Object* object,
__ j(zero, &miss, not_taken);
// Do the right check and compute the holder register.
- Register reg =
- CheckPrototypes(JSObject::cast(object), edx, holder,
- ebx, eax, name, &miss);
+ Register reg = CheckPrototypes(object, edx, holder, ebx, eax, name, &miss);
GenerateFastPropertyLoad(masm(), edi, reg, holder, index);
@@ -969,15 +1239,31 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
// unless we're doing a receiver map check.
ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
+ CallOptimization optimization(function);
+ int depth = kInvalidProtoDepth;
+
switch (check) {
case RECEIVER_MAP_CHECK:
+ __ IncrementCounter(&Counters::call_const, 1);
+
+ if (optimization.is_simple_api_call() && !object->IsGlobalObject()) {
+ depth = optimization.GetPrototypeDepthOfExpectedType(
+ JSObject::cast(object), holder);
+ }
+
+ if (depth != kInvalidProtoDepth) {
+ __ IncrementCounter(&Counters::call_const_fast_api, 1);
+ ReserveSpaceForFastApiCall(masm(), eax);
+ }
+
// Check that the maps haven't changed.
CheckPrototypes(JSObject::cast(object), edx, holder,
- ebx, eax, name, &miss);
+ ebx, eax, name, depth, &miss);
// Patch the receiver on the stack with the global proxy if
// necessary.
if (object->IsGlobalObject()) {
+ ASSERT(depth == kInvalidProtoDepth);
__ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
__ mov(Operand(esp, (argc + 1) * kPointerSize), edx);
}
@@ -1062,19 +1348,17 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
UNREACHABLE();
}
- // Get the function and setup the context.
- __ mov(edi, Immediate(Handle<JSFunction>(function)));
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-
- // Jump to the cached code (tail call).
- ASSERT(function->is_compiled());
- Handle<Code> code(function->code());
- ParameterCount expected(function->shared()->formal_parameter_count());
- __ InvokeCode(code, expected, arguments(),
- RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+ if (depth != kInvalidProtoDepth) {
+ GenerateFastApiCall(masm(), optimization, argc);
+ } else {
+ __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
+ }
// Handle call cache miss.
__ bind(&miss);
+ if (depth != kInvalidProtoDepth) {
+ FreeSpaceForFastApiCall(masm(), eax);
+ }
Handle<Code> ic = ComputeCallMiss(arguments().immediate());
__ jmp(ic, RelocInfo::CODE_TARGET);
@@ -1087,7 +1371,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
}
-Object* CallStubCompiler::CompileCallInterceptor(Object* object,
+Object* CallStubCompiler::CompileCallInterceptor(JSObject* object,
JSObject* holder,
String* name) {
// ----------- S t a t e -------------
@@ -1108,18 +1392,16 @@ Object* CallStubCompiler::CompileCallInterceptor(Object* object,
// Get the receiver from the stack.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
- CallInterceptorCompiler compiler(arguments(), ecx);
- CompileLoadInterceptor(&compiler,
- this,
- masm(),
- JSObject::cast(object),
- holder,
- name,
- &lookup,
- edx,
- ebx,
- edi,
- &miss);
+ CallInterceptorCompiler compiler(this, arguments(), ecx);
+ compiler.Compile(masm(),
+ object,
+ holder,
+ name,
+ &lookup,
+ edx,
+ ebx,
+ edi,
+ &miss);
// Restore receiver.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
@@ -1249,7 +1531,6 @@ Object* StoreStubCompiler::CompileStoreField(JSObject* object,
// Generate store field code. Trashes the name register.
GenerateStoreField(masm(),
- Builtins::StoreIC_ExtendStorage,
object,
index,
transition,
@@ -1423,15 +1704,14 @@ Object* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
__ j(not_equal, &miss, not_taken);
// Get the object from the stack.
- __ mov(ebx, Operand(esp, 2 * kPointerSize));
+ __ mov(edx, Operand(esp, 2 * kPointerSize));
// Generate store field code. Trashes the name register.
GenerateStoreField(masm(),
- Builtins::KeyedStoreIC_ExtendStorage,
object,
index,
transition,
- ebx, ecx, edx,
+ edx, ecx, ebx,
&miss);
// Handle store cache miss.
@@ -1451,13 +1731,12 @@ Object* LoadStubCompiler::CompileLoadField(JSObject* object,
int index,
String* name) {
// ----------- S t a t e -------------
+ // -- eax : receiver
// -- ecx : name
// -- esp[0] : return address
- // -- esp[4] : receiver
// -----------------------------------
Label miss;
- __ mov(eax, Operand(esp, kPointerSize));
GenerateLoadField(object, holder, eax, ebx, edx, index, name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -1472,13 +1751,12 @@ Object* LoadStubCompiler::CompileLoadCallback(String* name,
JSObject* holder,
AccessorInfo* callback) {
// ----------- S t a t e -------------
+ // -- eax : receiver
// -- ecx : name
// -- esp[0] : return address
- // -- esp[4] : receiver
// -----------------------------------
Label miss;
- __ mov(eax, Operand(esp, kPointerSize));
Failure* failure = Failure::InternalError();
bool success = GenerateLoadCallback(object, holder, eax, ecx, ebx, edx,
callback, name, &miss, &failure);
@@ -1497,13 +1775,12 @@ Object* LoadStubCompiler::CompileLoadConstant(JSObject* object,
Object* value,
String* name) {
// ----------- S t a t e -------------
+ // -- eax : receiver
// -- ecx : name
// -- esp[0] : return address
- // -- esp[4] : receiver
// -----------------------------------
Label miss;
- __ mov(eax, Operand(esp, kPointerSize));
GenerateLoadConstant(object, holder, eax, ebx, edx, value, name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -1517,16 +1794,15 @@ Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
JSObject* holder,
String* name) {
// ----------- S t a t e -------------
+ // -- eax : receiver
// -- ecx : name
// -- esp[0] : return address
- // -- esp[4] : receiver
// -----------------------------------
Label miss;
LookupResult lookup;
LookupPostInterceptor(holder, name, &lookup);
- __ mov(eax, Operand(esp, kPointerSize));
// TODO(368): Compile in the whole chain: all the interceptors in
// prototypes and ultimate answer.
GenerateLoadInterceptor(receiver,
@@ -1553,15 +1829,12 @@ Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
String* name,
bool is_dont_delete) {
// ----------- S t a t e -------------
+ // -- eax : receiver
// -- ecx : name
// -- esp[0] : return address
- // -- esp[4] : receiver
// -----------------------------------
Label miss;
- // Get the receiver from the stack.
- __ mov(eax, Operand(esp, kPointerSize));
-
// If the object is the holder then we know that it's a global
// object which can only happen for contextual loads. In this case,
// the receiver cannot be a smi.
@@ -1574,19 +1847,20 @@ Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
CheckPrototypes(object, eax, holder, ebx, edx, name, &miss);
// Get the value from the cell.
- __ mov(eax, Immediate(Handle<JSGlobalPropertyCell>(cell)));
- __ mov(eax, FieldOperand(eax, JSGlobalPropertyCell::kValueOffset));
+ __ mov(ebx, Immediate(Handle<JSGlobalPropertyCell>(cell)));
+ __ mov(ebx, FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset));
// Check for deleted property if property can actually be deleted.
if (!is_dont_delete) {
- __ cmp(eax, Factory::the_hole_value());
+ __ cmp(ebx, Factory::the_hole_value());
__ j(equal, &miss, not_taken);
} else if (FLAG_debug_code) {
- __ cmp(eax, Factory::the_hole_value());
+ __ cmp(ebx, Factory::the_hole_value());
__ Check(not_equal, "DontDelete cells can't contain the hole");
}
__ IncrementCounter(&Counters::named_load_global_inline, 1);
+ __ mov(eax, ebx);
__ ret(0);
__ bind(&miss);
@@ -1603,21 +1877,19 @@ Object* KeyedLoadStubCompiler::CompileLoadField(String* name,
JSObject* holder,
int index) {
// ----------- S t a t e -------------
+ // -- eax : key
+ // -- edx : receiver
// -- esp[0] : return address
- // -- esp[4] : name
- // -- esp[8] : receiver
// -----------------------------------
Label miss;
- __ mov(eax, Operand(esp, kPointerSize));
- __ mov(ecx, Operand(esp, 2 * kPointerSize));
__ IncrementCounter(&Counters::keyed_load_field, 1);
// Check that the name has not changed.
__ cmp(Operand(eax), Immediate(Handle<String>(name)));
__ j(not_equal, &miss, not_taken);
- GenerateLoadField(receiver, holder, ecx, ebx, edx, index, name, &miss);
+ GenerateLoadField(receiver, holder, edx, ebx, ecx, index, name, &miss);
__ bind(&miss);
__ DecrementCounter(&Counters::keyed_load_field, 1);
@@ -1633,14 +1905,12 @@ Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name,
JSObject* holder,
AccessorInfo* callback) {
// ----------- S t a t e -------------
+ // -- eax : key
+ // -- edx : receiver
// -- esp[0] : return address
- // -- esp[4] : name
- // -- esp[8] : receiver
// -----------------------------------
Label miss;
- __ mov(eax, Operand(esp, kPointerSize));
- __ mov(ecx, Operand(esp, 2 * kPointerSize));
__ IncrementCounter(&Counters::keyed_load_callback, 1);
// Check that the name has not changed.
@@ -1648,7 +1918,7 @@ Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name,
__ j(not_equal, &miss, not_taken);
Failure* failure = Failure::InternalError();
- bool success = GenerateLoadCallback(receiver, holder, ecx, eax, ebx, edx,
+ bool success = GenerateLoadCallback(receiver, holder, edx, eax, ebx, ecx,
callback, name, &miss, &failure);
if (!success) return failure;
@@ -1666,21 +1936,19 @@ Object* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
JSObject* holder,
Object* value) {
// ----------- S t a t e -------------
+ // -- eax : key
+ // -- edx : receiver
// -- esp[0] : return address
- // -- esp[4] : name
- // -- esp[8] : receiver
// -----------------------------------
Label miss;
- __ mov(eax, Operand(esp, kPointerSize));
- __ mov(ecx, Operand(esp, 2 * kPointerSize));
__ IncrementCounter(&Counters::keyed_load_constant_function, 1);
// Check that the name has not changed.
__ cmp(Operand(eax), Immediate(Handle<String>(name)));
__ j(not_equal, &miss, not_taken);
- GenerateLoadConstant(receiver, holder, ecx, ebx, edx,
+ GenerateLoadConstant(receiver, holder, edx, ebx, ecx,
value, name, &miss);
__ bind(&miss);
__ DecrementCounter(&Counters::keyed_load_constant_function, 1);
@@ -1695,14 +1963,12 @@ Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
JSObject* holder,
String* name) {
// ----------- S t a t e -------------
+ // -- eax : key
+ // -- edx : receiver
// -- esp[0] : return address
- // -- esp[4] : name
- // -- esp[8] : receiver
// -----------------------------------
Label miss;
- __ mov(eax, Operand(esp, kPointerSize));
- __ mov(ecx, Operand(esp, 2 * kPointerSize));
__ IncrementCounter(&Counters::keyed_load_interceptor, 1);
// Check that the name has not changed.
@@ -1714,9 +1980,9 @@ Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
GenerateLoadInterceptor(receiver,
holder,
&lookup,
- ecx,
- eax,
edx,
+ eax,
+ ecx,
ebx,
name,
&miss);
@@ -1733,21 +1999,19 @@ Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
Object* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
// ----------- S t a t e -------------
+ // -- eax : key
+ // -- edx : receiver
// -- esp[0] : return address
- // -- esp[4] : name
- // -- esp[8] : receiver
// -----------------------------------
Label miss;
- __ mov(eax, Operand(esp, kPointerSize));
- __ mov(ecx, Operand(esp, 2 * kPointerSize));
__ IncrementCounter(&Counters::keyed_load_array_length, 1);
// Check that the name has not changed.
__ cmp(Operand(eax), Immediate(Handle<String>(name)));
__ j(not_equal, &miss, not_taken);
- GenerateLoadArrayLength(masm(), ecx, edx, &miss);
+ GenerateLoadArrayLength(masm(), edx, ecx, &miss);
__ bind(&miss);
__ DecrementCounter(&Counters::keyed_load_array_length, 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -1759,21 +2023,19 @@ Object* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
Object* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
// ----------- S t a t e -------------
+ // -- eax : key
+ // -- edx : receiver
// -- esp[0] : return address
- // -- esp[4] : name
- // -- esp[8] : receiver
// -----------------------------------
Label miss;
- __ mov(eax, Operand(esp, kPointerSize));
- __ mov(ecx, Operand(esp, 2 * kPointerSize));
__ IncrementCounter(&Counters::keyed_load_string_length, 1);
// Check that the name has not changed.
__ cmp(Operand(eax), Immediate(Handle<String>(name)));
__ j(not_equal, &miss, not_taken);
- GenerateLoadStringLength(masm(), ecx, edx, &miss);
+ GenerateLoadStringLength(masm(), edx, ecx, ebx, &miss);
__ bind(&miss);
__ DecrementCounter(&Counters::keyed_load_string_length, 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -1785,21 +2047,19 @@ Object* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
Object* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
// ----------- S t a t e -------------
+ // -- eax : key
+ // -- edx : receiver
// -- esp[0] : return address
- // -- esp[4] : name
- // -- esp[8] : receiver
// -----------------------------------
Label miss;
- __ mov(eax, Operand(esp, kPointerSize));
- __ mov(ecx, Operand(esp, 2 * kPointerSize));
__ IncrementCounter(&Counters::keyed_load_function_prototype, 1);
// Check that the name has not changed.
__ cmp(Operand(eax), Immediate(Handle<String>(name)));
__ j(not_equal, &miss, not_taken);
- GenerateLoadFunctionPrototype(masm(), ecx, edx, ebx, &miss);
+ GenerateLoadFunctionPrototype(masm(), edx, ecx, ebx, &miss);
__ bind(&miss);
__ DecrementCounter(&Counters::keyed_load_function_prototype, 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
diff --git a/deps/v8/src/ia32/virtual-frame-ia32.cc b/deps/v8/src/ia32/virtual-frame-ia32.cc
index 9267507c7..d2485392f 100644
--- a/deps/v8/src/ia32/virtual-frame-ia32.cc
+++ b/deps/v8/src/ia32/virtual-frame-ia32.cc
@@ -45,7 +45,7 @@ VirtualFrame::VirtualFrame()
: elements_(parameter_count() + local_count() + kPreallocatedElements),
stack_pointer_(parameter_count() + 1) { // 0-based index of TOS.
for (int i = 0; i <= stack_pointer_; i++) {
- elements_.Add(FrameElement::MemoryElement());
+ elements_.Add(FrameElement::MemoryElement(NumberInfo::kUnknown));
}
for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
register_locations_[i] = kIllegalIndex;
@@ -173,10 +173,12 @@ void VirtualFrame::MakeMergable() {
for (int i = 0; i < element_count(); i++) {
FrameElement element = elements_[i];
+ // All number type information is reset to unknown for a mergable frame
+ // because of incoming back edges.
if (element.is_constant() || element.is_copy()) {
if (element.is_synced()) {
// Just spill.
- elements_[i] = FrameElement::MemoryElement();
+ elements_[i] = FrameElement::MemoryElement(NumberInfo::kUnknown);
} else {
// Allocate to a register.
FrameElement backing_element; // Invalid if not a copy.
@@ -187,7 +189,8 @@ void VirtualFrame::MakeMergable() {
ASSERT(fresh.is_valid()); // A register was spilled if all were in use.
elements_[i] =
FrameElement::RegisterElement(fresh.reg(),
- FrameElement::NOT_SYNCED);
+ FrameElement::NOT_SYNCED,
+ NumberInfo::kUnknown);
Use(fresh.reg(), i);
// Emit a move.
@@ -220,6 +223,7 @@ void VirtualFrame::MakeMergable() {
// The copy flag is not relied on before the end of this loop,
// including when registers are spilled.
elements_[i].clear_copied();
+ elements_[i].set_number_info(NumberInfo::kUnknown);
}
}
}
@@ -607,10 +611,14 @@ int VirtualFrame::InvalidateFrameSlotAt(int index) {
// Set the new backing element.
if (elements_[new_backing_index].is_synced()) {
elements_[new_backing_index] =
- FrameElement::RegisterElement(backing_reg, FrameElement::SYNCED);
+ FrameElement::RegisterElement(backing_reg,
+ FrameElement::SYNCED,
+ original.number_info());
} else {
elements_[new_backing_index] =
- FrameElement::RegisterElement(backing_reg, FrameElement::NOT_SYNCED);
+ FrameElement::RegisterElement(backing_reg,
+ FrameElement::NOT_SYNCED,
+ original.number_info());
}
// Update the other copies.
for (int i = new_backing_index + 1; i < element_count(); i++) {
@@ -641,7 +649,8 @@ void VirtualFrame::TakeFrameSlotAt(int index) {
ASSERT(fresh.is_valid());
FrameElement new_element =
FrameElement::RegisterElement(fresh.reg(),
- FrameElement::NOT_SYNCED);
+ FrameElement::NOT_SYNCED,
+ original.number_info());
Use(fresh.reg(), element_count());
elements_.Add(new_element);
__ mov(fresh.reg(), Operand(ebp, fp_relative(index)));
@@ -853,6 +862,17 @@ Result VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
}
+#ifdef ENABLE_DEBUGGER_SUPPORT
+void VirtualFrame::DebugBreak() {
+ PrepareForCall(0, 0);
+ ASSERT(cgen()->HasValidEntryRegisters());
+ __ DebugBreak();
+ Result result = cgen()->allocator()->Allocate(eax);
+ ASSERT(result.is_valid());
+}
+#endif
+
+
Result VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
InvokeFlag flag,
int arg_count) {
@@ -877,22 +897,53 @@ Result VirtualFrame::RawCallCodeObject(Handle<Code> code,
Result VirtualFrame::CallLoadIC(RelocInfo::Mode mode) {
// Name and receiver are on the top of the frame. The IC expects
- // name in ecx and receiver on the stack. It does not drop the
- // receiver.
+ // name in ecx and receiver in eax.
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
Result name = Pop();
- PrepareForCall(1, 0); // One stack arg, not callee-dropped.
- name.ToRegister(ecx);
+ Result receiver = Pop();
+ PrepareForCall(0, 0); // No stack arguments.
+ // Move results to the right registers:
+ if (name.is_register() && name.reg().is(eax)) {
+ if (receiver.is_register() && receiver.reg().is(ecx)) {
+ // Wrong registers.
+ __ xchg(eax, ecx);
+ } else {
+ // Register ecx is free for name, which frees eax for receiver.
+ name.ToRegister(ecx);
+ receiver.ToRegister(eax);
+ }
+ } else {
+ // Register eax is free for receiver, which frees ecx for name.
+ receiver.ToRegister(eax);
+ name.ToRegister(ecx);
+ }
name.Unuse();
+ receiver.Unuse();
return RawCallCodeObject(ic, mode);
}
Result VirtualFrame::CallKeyedLoadIC(RelocInfo::Mode mode) {
- // Key and receiver are on top of the frame. The IC expects them on
- // the stack. It does not drop them.
+ // Key and receiver are on top of the frame. Put them in eax and edx.
+ Result key = Pop();
+ Result receiver = Pop();
+ PrepareForCall(0, 0);
+
+ if (!key.is_register() || !key.reg().is(edx)) {
+ // Register edx is available for receiver.
+ receiver.ToRegister(edx);
+ key.ToRegister(eax);
+ } else if (!receiver.is_register() || !receiver.reg().is(eax)) {
+ // Register eax is available for key.
+ key.ToRegister(eax);
+ receiver.ToRegister(edx);
+ } else {
+ __ xchg(edx, eax);
+ }
+ key.Unuse();
+ receiver.Unuse();
+
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- PrepareForCall(2, 0); // Two stack args, neither callee-dropped.
return RawCallCodeObject(ic, mode);
}
@@ -947,7 +998,6 @@ Result VirtualFrame::CallKeyedStoreIC() {
// expects value in eax and key and receiver on the stack. It does
// not drop the key and receiver.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
- // TODO(1222589): Make the IC grab the values from the stack.
Result value = Pop();
PrepareForCall(2, 0); // Two stack args, neither callee-dropped.
value.ToRegister(eax);
@@ -1025,6 +1075,14 @@ Result VirtualFrame::Pop() {
int index = element_count();
ASSERT(element.is_valid());
+ // Get number type information of the result.
+ NumberInfo::Type info;
+ if (!element.is_copy()) {
+ info = element.number_info();
+ } else {
+ info = elements_[element.index()].number_info();
+ }
+
bool pop_needed = (stack_pointer_ == index);
if (pop_needed) {
stack_pointer_--;
@@ -1032,6 +1090,7 @@ Result VirtualFrame::Pop() {
Result temp = cgen()->allocator()->Allocate();
ASSERT(temp.is_valid());
__ pop(temp.reg());
+ temp.set_number_info(info);
return temp;
}
@@ -1059,14 +1118,16 @@ Result VirtualFrame::Pop() {
ASSERT(temp.is_valid());
Use(temp.reg(), index);
FrameElement new_element =
- FrameElement::RegisterElement(temp.reg(), FrameElement::SYNCED);
+ FrameElement::RegisterElement(temp.reg(),
+ FrameElement::SYNCED,
+ element.number_info());
// Preserve the copy flag on the element.
if (element.is_copied()) new_element.set_copied();
elements_[index] = new_element;
__ mov(temp.reg(), Operand(ebp, fp_relative(index)));
- return Result(temp.reg());
+ return Result(temp.reg(), info);
} else if (element.is_register()) {
- return Result(element.reg());
+ return Result(element.reg(), info);
} else {
ASSERT(element.is_constant());
return Result(element.handle());
@@ -1090,25 +1151,25 @@ void VirtualFrame::EmitPop(Operand operand) {
}
-void VirtualFrame::EmitPush(Register reg) {
+void VirtualFrame::EmitPush(Register reg, NumberInfo::Type info) {
ASSERT(stack_pointer_ == element_count() - 1);
- elements_.Add(FrameElement::MemoryElement());
+ elements_.Add(FrameElement::MemoryElement(info));
stack_pointer_++;
__ push(reg);
}
-void VirtualFrame::EmitPush(Operand operand) {
+void VirtualFrame::EmitPush(Operand operand, NumberInfo::Type info) {
ASSERT(stack_pointer_ == element_count() - 1);
- elements_.Add(FrameElement::MemoryElement());
+ elements_.Add(FrameElement::MemoryElement(info));
stack_pointer_++;
__ push(operand);
}
-void VirtualFrame::EmitPush(Immediate immediate) {
+void VirtualFrame::EmitPush(Immediate immediate, NumberInfo::Type info) {
ASSERT(stack_pointer_ == element_count() - 1);
- elements_.Add(FrameElement::MemoryElement());
+ elements_.Add(FrameElement::MemoryElement(info));
stack_pointer_++;
__ push(immediate);
}
diff --git a/deps/v8/src/ia32/virtual-frame-ia32.h b/deps/v8/src/ia32/virtual-frame-ia32.h
index d6d55d12c..b078ba089 100644
--- a/deps/v8/src/ia32/virtual-frame-ia32.h
+++ b/deps/v8/src/ia32/virtual-frame-ia32.h
@@ -28,6 +28,7 @@
#ifndef V8_IA32_VIRTUAL_FRAME_IA32_H_
#define V8_IA32_VIRTUAL_FRAME_IA32_H_
+#include "number-info.h"
#include "register-allocator.h"
#include "scopes.h"
@@ -82,7 +83,8 @@ class VirtualFrame: public ZoneObject {
MacroAssembler* masm() { return cgen()->masm(); }
// Create a duplicate of an existing valid frame element.
- FrameElement CopyElementAt(int index);
+ FrameElement CopyElementAt(int index,
+ NumberInfo::Type info = NumberInfo::kUninitialized);
// The number of elements on the virtual frame.
int element_count() { return elements_.length(); }
@@ -324,12 +326,16 @@ class VirtualFrame: public ZoneObject {
Result CallRuntime(Runtime::Function* f, int arg_count);
Result CallRuntime(Runtime::FunctionId id, int arg_count);
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ void DebugBreak();
+#endif
+
// Invoke builtin given the number of arguments it expects on (and
// removes from) the stack.
Result InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag, int arg_count);
// Call load IC. Name and receiver are found on top of the frame.
- // Receiver is not dropped.
+ // Both are dropped.
Result CallLoadIC(RelocInfo::Mode mode);
// Call keyed load IC. Key and receiver are found on top of the
@@ -381,12 +387,15 @@ class VirtualFrame: public ZoneObject {
// Push an element on top of the expression stack and emit a
// corresponding push instruction.
- void EmitPush(Register reg);
- void EmitPush(Operand operand);
- void EmitPush(Immediate immediate);
+ void EmitPush(Register reg,
+ NumberInfo::Type info = NumberInfo::kUnknown);
+ void EmitPush(Operand operand,
+ NumberInfo::Type info = NumberInfo::kUnknown);
+ void EmitPush(Immediate immediate,
+ NumberInfo::Type info = NumberInfo::kUnknown);
// Push an element on the virtual frame.
- void Push(Register reg);
+ void Push(Register reg, NumberInfo::Type info = NumberInfo::kUnknown);
void Push(Handle<Object> value);
void Push(Smi* value) {
Push(Handle<Object> (value));
@@ -398,7 +407,7 @@ class VirtualFrame: public ZoneObject {
// This assert will trigger if you try to push the same value twice.
ASSERT(result->is_valid());
if (result->is_register()) {
- Push(result->reg());
+ Push(result->reg(), result->number_info());
} else {
ASSERT(result->is_constant());
Push(result->handle());
diff --git a/deps/v8/src/ic.cc b/deps/v8/src/ic.cc
index 8fc9ddb86..31ece04a0 100644
--- a/deps/v8/src/ic.cc
+++ b/deps/v8/src/ic.cc
@@ -330,10 +330,11 @@ static void LookupForRead(Object* object,
while (true) {
object->Lookup(name, lookup);
// Besides normal conditions (property not found or it's not
- // an interceptor), bail out of lookup is not cacheable: we won't
+ // an interceptor), bail out if lookup is not cacheable: we won't
// be able to IC it anyway and regular lookup should work fine.
- if (lookup->IsNotFound() || lookup->type() != INTERCEPTOR ||
- !lookup->IsCacheable()) {
+ if (!lookup->IsFound()
+ || (lookup->type() != INTERCEPTOR)
+ || !lookup->IsCacheable()) {
return;
}
@@ -343,7 +344,7 @@ static void LookupForRead(Object* object,
}
holder->LocalLookupRealNamedProperty(name, lookup);
- if (lookup->IsValid()) {
+ if (lookup->IsProperty()) {
ASSERT(lookup->type() != INTERCEPTOR);
return;
}
@@ -422,7 +423,7 @@ Object* CallIC::LoadFunction(State state,
LookupResult lookup;
LookupForRead(*object, *name, &lookup);
- if (!lookup.IsValid()) {
+ if (!lookup.IsProperty()) {
// If the object does not have the requested property, check which
// exception we need to throw.
if (IsContextual(object)) {
@@ -455,7 +456,7 @@ Object* CallIC::LoadFunction(State state,
if (result->IsJSFunction()) {
// Check if there is an optimized (builtin) version of the function.
- // Ignored this will degrade performance for Array.prototype.{push,pop}.
+ // Ignored this will degrade performance for some Array functions.
// Please note we only return the optimized function iff
// the JSObject has FastElements.
if (object->IsJSObject() && JSObject::cast(*object)->HasFastElements()) {
@@ -493,7 +494,7 @@ void CallIC::UpdateCaches(LookupResult* lookup,
Handle<String> name) {
ASSERT(lookup->IsLoaded());
// Bail out if we didn't find a result.
- if (!lookup->IsValid() || !lookup->IsCacheable()) return;
+ if (!lookup->IsProperty() || !lookup->IsCacheable()) return;
// Compute the number of arguments.
int argc = target()->arguments_count();
@@ -642,8 +643,8 @@ Object* LoadIC::Load(State state, Handle<Object> object, Handle<String> name) {
LookupResult lookup;
LookupForRead(*object, *name, &lookup);
- // If lookup is invalid, check if we need to throw an exception.
- if (!lookup.IsValid()) {
+ // If we did not find a property, check if we need to throw an exception.
+ if (!lookup.IsProperty()) {
if (FLAG_strict || IsContextual(object)) {
return ReferenceError("not_defined", name);
}
@@ -653,7 +654,7 @@ Object* LoadIC::Load(State state, Handle<Object> object, Handle<String> name) {
bool can_be_inlined =
FLAG_use_ic &&
state == PREMONOMORPHIC &&
- lookup.IsValid() &&
+ lookup.IsProperty() &&
lookup.IsLoaded() &&
lookup.IsCacheable() &&
lookup.holder() == *object &&
@@ -681,7 +682,7 @@ Object* LoadIC::Load(State state, Handle<Object> object, Handle<String> name) {
}
PropertyAttributes attr;
- if (lookup.IsValid() && lookup.type() == INTERCEPTOR) {
+ if (lookup.IsProperty() && lookup.type() == INTERCEPTOR) {
// Get the property.
Object* result = object->GetProperty(*object, &lookup, *name, &attr);
if (result->IsFailure()) return result;
@@ -704,7 +705,7 @@ void LoadIC::UpdateCaches(LookupResult* lookup,
Handle<String> name) {
ASSERT(lookup->IsLoaded());
// Bail out if we didn't find a result.
- if (!lookup->IsValid() || !lookup->IsCacheable()) return;
+ if (!lookup->IsProperty() || !lookup->IsCacheable()) return;
// Loading properties from values is not common, so don't try to
// deal with non-JS objects here.
@@ -857,8 +858,8 @@ Object* KeyedLoadIC::Load(State state,
LookupResult lookup;
LookupForRead(*object, *name, &lookup);
- // If lookup is invalid, check if we need to throw an exception.
- if (!lookup.IsValid()) {
+ // If we did not find a property, check if we need to throw an exception.
+ if (!lookup.IsProperty()) {
if (FLAG_strict || IsContextual(object)) {
return ReferenceError("not_defined", name);
}
@@ -869,7 +870,7 @@ Object* KeyedLoadIC::Load(State state,
}
PropertyAttributes attr;
- if (lookup.IsValid() && lookup.type() == INTERCEPTOR) {
+ if (lookup.IsProperty() && lookup.type() == INTERCEPTOR) {
// Get the property.
Object* result = object->GetProperty(*object, &lookup, *name, &attr);
if (result->IsFailure()) return result;
@@ -896,6 +897,8 @@ Object* KeyedLoadIC::Load(State state,
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
if (receiver->HasExternalArrayElements()) {
stub = external_array_stub(receiver->GetElementsKind());
+ } else if (receiver->HasIndexedInterceptor()) {
+ stub = indexed_interceptor_stub();
}
}
set_target(stub);
@@ -919,7 +922,7 @@ void KeyedLoadIC::UpdateCaches(LookupResult* lookup, State state,
Handle<Object> object, Handle<String> name) {
ASSERT(lookup->IsLoaded());
// Bail out if we didn't find a result.
- if (!lookup->IsValid() || !lookup->IsCacheable()) return;
+ if (!lookup->IsProperty() || !lookup->IsCacheable()) return;
if (!object->IsJSObject()) return;
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
@@ -992,7 +995,7 @@ void KeyedLoadIC::UpdateCaches(LookupResult* lookup, State state,
static bool StoreICableLookup(LookupResult* lookup) {
// Bail out if we didn't find a result.
- if (!lookup->IsValid() || !lookup->IsCacheable()) return false;
+ if (!lookup->IsPropertyOrTransition() || !lookup->IsCacheable()) return false;
// If the property is read-only, we leave the IC in its current
// state.
@@ -1046,6 +1049,20 @@ Object* StoreIC::Store(State state,
return *value;
}
+
+ // Use specialized code for setting the length of arrays.
+ if (receiver->IsJSArray()
+ && name->Equals(Heap::length_symbol())
+ && receiver->AllowsSetElementsLength()) {
+#ifdef DEBUG
+ if (FLAG_trace_ic) PrintF("[StoreIC : +#length /array]\n");
+#endif
+ Code* target = Builtins::builtin(Builtins::StoreIC_ArrayLength);
+ set_target(target);
+ StubCache::Set(*name, HeapObject::cast(*object)->map(), target);
+ return receiver->SetProperty(*name, *value, NONE);
+ }
+
// Lookup the property locally in the receiver.
if (FLAG_use_ic && !receiver->IsJSGlobalProxy()) {
LookupResult lookup;
@@ -1212,7 +1229,7 @@ void KeyedStoreIC::UpdateCaches(LookupResult* lookup,
if (receiver->IsJSGlobalProxy()) return;
// Bail out if we didn't find a result.
- if (!lookup->IsValid() || !lookup->IsCacheable()) return;
+ if (!lookup->IsPropertyOrTransition() || !lookup->IsCacheable()) return;
// If the property is read-only, we leave the IC in its current
// state.
@@ -1320,16 +1337,6 @@ Object* LoadIC_Miss(Arguments args) {
}
-void LoadIC::GenerateInitialize(MacroAssembler* masm) {
- Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
-}
-
-
-void LoadIC::GeneratePreMonomorphic(MacroAssembler* masm) {
- Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
-}
-
-
// Used from ic_<arch>.cc
Object* KeyedLoadIC_Miss(Arguments args) {
NoHandleAllocation na;
@@ -1340,16 +1347,6 @@ Object* KeyedLoadIC_Miss(Arguments args) {
}
-void KeyedLoadIC::GenerateInitialize(MacroAssembler* masm) {
- Generate(masm, ExternalReference(IC_Utility(kKeyedLoadIC_Miss)));
-}
-
-
-void KeyedLoadIC::GeneratePreMonomorphic(MacroAssembler* masm) {
- Generate(masm, ExternalReference(IC_Utility(kKeyedLoadIC_Miss)));
-}
-
-
// Used from ic_<arch>.cc.
Object* StoreIC_Miss(Arguments args) {
NoHandleAllocation na;
@@ -1361,6 +1358,17 @@ Object* StoreIC_Miss(Arguments args) {
}
+Object* StoreIC_ArrayLength(Arguments args) {
+ NoHandleAllocation nha;
+
+ ASSERT(args.length() == 2);
+ JSObject* receiver = JSObject::cast(args[0]);
+ Object* len = args[1];
+
+ return receiver->SetElementsLength(len);
+}
+
+
// Extend storage is called in a store inline cache when
// it is necessary to extend the properties array of a
// JSObject.
@@ -1406,16 +1414,6 @@ Object* KeyedStoreIC_Miss(Arguments args) {
}
-void KeyedStoreIC::GenerateInitialize(MacroAssembler* masm) {
- Generate(masm, ExternalReference(IC_Utility(kKeyedStoreIC_Miss)));
-}
-
-
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
- Generate(masm, ExternalReference(IC_Utility(kKeyedStoreIC_Miss)));
-}
-
-
static Address IC_utilities[] = {
#define ADDR(name) FUNCTION_ADDR(name),
IC_UTIL_LIST(ADDR)
diff --git a/deps/v8/src/ic.h b/deps/v8/src/ic.h
index a991e30af..d545989bf 100644
--- a/deps/v8/src/ic.h
+++ b/deps/v8/src/ic.h
@@ -45,6 +45,7 @@ enum DictionaryCheck { CHECK_DICTIONARY, DICTIONARY_CHECK_DONE };
ICU(KeyedLoadIC_Miss) \
ICU(CallIC_Miss) \
ICU(StoreIC_Miss) \
+ ICU(StoreIC_ArrayLength) \
ICU(SharedStoreIC_ExtendStorage) \
ICU(KeyedStoreIC_Miss) \
/* Utilities for IC stubs. */ \
@@ -53,6 +54,7 @@ enum DictionaryCheck { CHECK_DICTIONARY, DICTIONARY_CHECK_DONE };
ICU(LoadPropertyWithInterceptorOnly) \
ICU(LoadPropertyWithInterceptorForLoad) \
ICU(LoadPropertyWithInterceptorForCall) \
+ ICU(KeyedLoadPropertyWithInterceptor) \
ICU(StoreInterceptorProperty)
//
@@ -223,8 +225,10 @@ class LoadIC: public IC {
Object* Load(State state, Handle<Object> object, Handle<String> name);
// Code generator routines.
- static void GenerateInitialize(MacroAssembler* masm);
- static void GeneratePreMonomorphic(MacroAssembler* masm);
+ static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
+ static void GeneratePreMonomorphic(MacroAssembler* masm) {
+ GenerateMiss(masm);
+ }
static void GenerateMiss(MacroAssembler* masm);
static void GenerateMegamorphic(MacroAssembler* masm);
static void GenerateNormal(MacroAssembler* masm);
@@ -240,8 +244,6 @@ class LoadIC: public IC {
static const int kOffsetToLoadInstruction;
private:
- static void Generate(MacroAssembler* masm, const ExternalReference& f);
-
// Update the inline cache and the global stub cache based on the
// lookup result.
void UpdateCaches(LookupResult* lookup,
@@ -279,8 +281,11 @@ class KeyedLoadIC: public IC {
// Code generator routines.
static void GenerateMiss(MacroAssembler* masm);
- static void GenerateInitialize(MacroAssembler* masm);
- static void GeneratePreMonomorphic(MacroAssembler* masm);
+ static void GenerateRuntimeGetProperty(MacroAssembler* masm);
+ static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
+ static void GeneratePreMonomorphic(MacroAssembler* masm) {
+ GenerateMiss(masm);
+ }
static void GenerateGeneric(MacroAssembler* masm);
static void GenerateString(MacroAssembler* masm);
@@ -290,6 +295,7 @@ class KeyedLoadIC: public IC {
// for all other types.
static void GenerateExternalArray(MacroAssembler* masm,
ExternalArrayType array_type);
+ static void GenerateIndexedInterceptor(MacroAssembler* masm);
// Clear the use of the inlined version.
static void ClearInlinedVersion(Address address);
@@ -302,8 +308,6 @@ class KeyedLoadIC: public IC {
static const int kSlowCaseBitFieldMask =
(1 << Map::kIsAccessCheckNeeded) | (1 << Map::kHasIndexedInterceptor);
- static void Generate(MacroAssembler* masm, const ExternalReference& f);
-
// Update the inline cache.
void UpdateCaches(LookupResult* lookup,
State state,
@@ -328,6 +332,10 @@ class KeyedLoadIC: public IC {
}
static Code* external_array_stub(JSObject::ElementsKind elements_kind);
+ static Code* indexed_interceptor_stub() {
+ return Builtins::builtin(Builtins::KeyedLoadIC_IndexedInterceptor);
+ }
+
static void Clear(Address address, Code* target);
// Support for patching the map that is checked in an inlined
@@ -351,7 +359,7 @@ class StoreIC: public IC {
static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
static void GenerateMiss(MacroAssembler* masm);
static void GenerateMegamorphic(MacroAssembler* masm);
- static void GenerateExtendStorage(MacroAssembler* masm);
+ static void GenerateArrayLength(MacroAssembler* masm);
private:
// Update the inline cache and the global stub cache based on the
@@ -384,10 +392,10 @@ class KeyedStoreIC: public IC {
Handle<Object> value);
// Code generators for stub routines. Only called once at startup.
- static void GenerateInitialize(MacroAssembler* masm);
+ static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
static void GenerateMiss(MacroAssembler* masm);
+ static void GenerateRuntimeSetProperty(MacroAssembler* masm);
static void GenerateGeneric(MacroAssembler* masm);
- static void GenerateExtendStorage(MacroAssembler* masm);
// Generators for external array types. See objects.h.
// These are similar to the generic IC; they optimize the case of
@@ -403,8 +411,6 @@ class KeyedStoreIC: public IC {
static void RestoreInlinedVersion(Address address);
private:
- static void Generate(MacroAssembler* masm, const ExternalReference& f);
-
// Update the inline cache.
void UpdateCaches(LookupResult* lookup,
State state,
diff --git a/deps/v8/src/json-delay.js b/deps/v8/src/json-delay.js
index 7788f516b..3e42d3660 100644
--- a/deps/v8/src/json-delay.js
+++ b/deps/v8/src/json-delay.js
@@ -80,8 +80,9 @@ var characterQuoteCache = {
};
function QuoteSingleJSONCharacter(c) {
- if (c in characterQuoteCache)
+ if (c in characterQuoteCache) {
return characterQuoteCache[c];
+ }
var charCode = c.charCodeAt(0);
var result;
if (charCode < 16) result = '\\u000';
@@ -101,15 +102,17 @@ function QuoteJSONString(str) {
function StackContains(stack, val) {
var length = stack.length;
for (var i = 0; i < length; i++) {
- if (stack[i] === val)
+ if (stack[i] === val) {
return true;
+ }
}
return false;
}
function SerializeArray(value, replacer, stack, indent, gap) {
- if (StackContains(stack, value))
+ if (StackContains(stack, value)) {
throw MakeTypeError('circular_structure', []);
+ }
stack.push(value);
var stepback = indent;
indent += gap;
@@ -117,9 +120,10 @@ function SerializeArray(value, replacer, stack, indent, gap) {
var len = value.length;
for (var i = 0; i < len; i++) {
var strP = JSONSerialize($String(i), value, replacer, stack,
- indent, gap);
- if (IS_UNDEFINED(strP))
+ indent, gap);
+ if (IS_UNDEFINED(strP)) {
strP = "null";
+ }
partial.push(strP);
}
var final;
@@ -137,8 +141,9 @@ function SerializeArray(value, replacer, stack, indent, gap) {
}
function SerializeObject(value, replacer, stack, indent, gap) {
- if (StackContains(stack, value))
+ if (StackContains(stack, value)) {
throw MakeTypeError('circular_structure', []);
+ }
stack.push(value);
var stepback = indent;
indent += gap;
@@ -188,17 +193,21 @@ function JSONSerialize(key, holder, replacer, stack, indent, gap) {
var value = holder[key];
if (IS_OBJECT(value) && value) {
var toJSON = value.toJSON;
- if (IS_FUNCTION(toJSON))
+ if (IS_FUNCTION(toJSON)) {
value = toJSON.call(value, key);
+ }
}
- if (IS_FUNCTION(replacer))
+ if (IS_FUNCTION(replacer)) {
value = replacer.call(holder, key, value);
+ }
// Unwrap value if necessary
if (IS_OBJECT(value)) {
if (IS_NUMBER_WRAPPER(value)) {
value = $Number(value);
} else if (IS_STRING_WRAPPER(value)) {
value = $String(value);
+ } else if (IS_BOOLEAN_WRAPPER(value)) {
+ value = $Boolean(value);
}
}
switch (typeof value) {
@@ -232,12 +241,17 @@ function JSONStringify(value, replacer, space) {
}
var gap;
if (IS_NUMBER(space)) {
- space = $Math.min(space, 100);
+ space = $Math.min(space, 10);
gap = "";
- for (var i = 0; i < space; i++)
+ for (var i = 0; i < space; i++) {
gap += " ";
+ }
} else if (IS_STRING(space)) {
- gap = space;
+ if (space.length > 10) {
+ gap = space.substring(0, 10);
+ } else {
+ gap = space;
+ }
} else {
gap = "";
}
diff --git a/deps/v8/src/jump-target-inl.h b/deps/v8/src/jump-target-inl.h
index 1f0676df0..dcd615eef 100644
--- a/deps/v8/src/jump-target-inl.h
+++ b/deps/v8/src/jump-target-inl.h
@@ -42,6 +42,9 @@ void JumpTarget::InitializeEntryElement(int index, FrameElement* target) {
} else if (target->is_copy()) {
entry_frame_->elements_[target->index()].set_copied();
}
+ if (direction_ == BIDIRECTIONAL) {
+ entry_frame_->elements_[index].set_number_info(NumberInfo::kUnknown);
+ }
}
} } // namespace v8::internal
diff --git a/deps/v8/src/jump-target.cc b/deps/v8/src/jump-target.cc
index 3782f92a9..66764e6a7 100644
--- a/deps/v8/src/jump-target.cc
+++ b/deps/v8/src/jump-target.cc
@@ -101,6 +101,17 @@ void JumpTarget::ComputeEntryFrame() {
if (element == NULL || !element->is_valid()) break;
element = element->Combine(&reaching_frames_[j]->elements_[i]);
+
+ FrameElement* other = &reaching_frames_[j]->elements_[i];
+ if (element != NULL && !element->is_copy()) {
+ ASSERT(other != NULL);
+ ASSERT(!other->is_copy());
+ // We overwrite the number information of one of the incoming frames.
+ // This is safe because we only use the frame for emitting merge code.
+ // The number information of incoming frames is not used anymore.
+ element->set_number_info(NumberInfo::Combine(element->number_info(),
+ other->number_info()));
+ }
}
elements[i] = element;
}
@@ -117,6 +128,7 @@ void JumpTarget::ComputeEntryFrame() {
// elements as copied exactly when they have a copy. Undetermined
// elements are initially recorded as if in memory.
if (target != NULL) {
+ ASSERT(!target->is_copy()); // These initial elements are never copies.
entry_frame_->elements_[index] = *target;
InitializeEntryElement(index, target);
}
@@ -125,7 +137,8 @@ void JumpTarget::ComputeEntryFrame() {
for (; index < length; index++) {
FrameElement* target = elements[index];
if (target == NULL) {
- entry_frame_->elements_.Add(FrameElement::MemoryElement());
+ entry_frame_->elements_.Add(
+ FrameElement::MemoryElement(NumberInfo::kUninitialized));
} else {
entry_frame_->elements_.Add(*target);
InitializeEntryElement(index, target);
@@ -142,9 +155,20 @@ void JumpTarget::ComputeEntryFrame() {
RegisterFile candidate_registers;
int best_count = kMinInt;
int best_reg_num = RegisterAllocator::kInvalidRegister;
+ NumberInfo::Type info = NumberInfo::kUninitialized;
for (int j = 0; j < reaching_frames_.length(); j++) {
FrameElement element = reaching_frames_[j]->elements_[i];
+ if (direction_ == BIDIRECTIONAL) {
+ info = NumberInfo::kUnknown;
+ } else if (!element.is_copy()) {
+ info = NumberInfo::Combine(info, element.number_info());
+ } else {
+ // New elements will not be copies, so get number information from
+ // backing element in the reaching frame.
+ info = NumberInfo::Combine(info,
+ reaching_frames_[j]->elements_[element.index()].number_info());
+ }
is_synced = is_synced && element.is_synced();
if (element.is_register() && !entry_frame_->is_used(element.reg())) {
// Count the register occurrence and remember it if better
@@ -158,11 +182,17 @@ void JumpTarget::ComputeEntryFrame() {
}
}
+ // We must have a number type information now (not for copied elements).
+ ASSERT(entry_frame_->elements_[i].is_copy()
+ || info != NumberInfo::kUninitialized);
+
// If the value is synced on all frames, put it in memory. This
// costs nothing at the merge code but will incur a
// memory-to-register move when the value is needed later.
if (is_synced) {
// Already recorded as a memory element.
+ // Set combined number info.
+ entry_frame_->elements_[i].set_number_info(info);
continue;
}
@@ -183,13 +213,27 @@ void JumpTarget::ComputeEntryFrame() {
bool is_copied = entry_frame_->elements_[i].is_copied();
Register reg = RegisterAllocator::ToRegister(best_reg_num);
entry_frame_->elements_[i] =
- FrameElement::RegisterElement(reg,
- FrameElement::NOT_SYNCED);
+ FrameElement::RegisterElement(reg, FrameElement::NOT_SYNCED,
+ NumberInfo::kUninitialized);
if (is_copied) entry_frame_->elements_[i].set_copied();
entry_frame_->set_register_location(reg, i);
}
+ // Set combined number info.
+ entry_frame_->elements_[i].set_number_info(info);
+ }
+ }
+
+ // If we have incoming backward edges assert we forget all number information.
+#ifdef DEBUG
+ if (direction_ == BIDIRECTIONAL) {
+ for (int i = 0; i < length; ++i) {
+ if (!entry_frame_->elements_[i].is_copy()) {
+ ASSERT(entry_frame_->elements_[i].number_info() ==
+ NumberInfo::kUnknown);
+ }
}
}
+#endif
// The stack pointer is at the highest synced element or the base of
// the expression stack.
diff --git a/deps/v8/src/liveedit.cc b/deps/v8/src/liveedit.cc
new file mode 100644
index 000000000..c50e007f9
--- /dev/null
+++ b/deps/v8/src/liveedit.cc
@@ -0,0 +1,87 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#include "v8.h"
+
+#include "liveedit.h"
+#include "compiler.h"
+#include "oprofile-agent.h"
+#include "scopes.h"
+#include "global-handles.h"
+#include "debug.h"
+
+namespace v8 {
+namespace internal {
+
+
+class FunctionInfoListener {
+ public:
+ void FunctionStarted(FunctionLiteral* fun) {
+ // Implementation follows.
+ }
+
+ void FunctionDone() {
+ // Implementation follows.
+ }
+
+ void FunctionScope(Scope* scope) {
+ // Implementation follows.
+ }
+
+ void FunctionCode(Handle<Code> function_code) {
+ // Implementation follows.
+ }
+};
+
+static FunctionInfoListener* active_function_info_listener = NULL;
+
+LiveEditFunctionTracker::LiveEditFunctionTracker(FunctionLiteral* fun) {
+ if (active_function_info_listener != NULL) {
+ active_function_info_listener->FunctionStarted(fun);
+ }
+}
+LiveEditFunctionTracker::~LiveEditFunctionTracker() {
+ if (active_function_info_listener != NULL) {
+ active_function_info_listener->FunctionDone();
+ }
+}
+void LiveEditFunctionTracker::RecordFunctionCode(Handle<Code> code) {
+ if (active_function_info_listener != NULL) {
+ active_function_info_listener->FunctionCode(code);
+ }
+}
+void LiveEditFunctionTracker::RecordFunctionScope(Scope* scope) {
+ if (active_function_info_listener != NULL) {
+ active_function_info_listener->FunctionScope(scope);
+ }
+}
+bool LiveEditFunctionTracker::IsActive() {
+ return active_function_info_listener != NULL;
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/liveedit.h b/deps/v8/src/liveedit.h
new file mode 100644
index 000000000..73aa7d3d3
--- /dev/null
+++ b/deps/v8/src/liveedit.h
@@ -0,0 +1,78 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_LIVEEDIT_H_
+#define V8_LIVEEDIT_H_
+
+
+
+// Live Edit feature implementation.
+// User should be able to change script on already running VM. This feature
+// matches hot swap features in other frameworks.
+//
+// The basic use-case is when user spots some mistake in function body
+// from debugger and wishes to change the algorithm without restart.
+//
+// A single change always has a form of a simple replacement (in pseudo-code):
+// script.source[positions, positions+length] = new_string;
+// Implementation first determines, which function's body includes this
+// change area. Then both old and new versions of script are fully compiled
+// in order to analyze, whether the function changed its outer scope
+// expectations (or number of parameters). If it didn't, function's code is
+// patched with a newly compiled code. If it did change, enclosing function
+// gets patched. All inner functions are left untouched, whatever happened
+// to them in a new script version. However, new version of code will
+// instantiate newly compiled functions.
+
+
+#include "compiler.h"
+
+namespace v8 {
+namespace internal {
+
+// This class collects some specific information on structure of functions
+// in a particular script. It gets called from compiler all the time, but
+// actually records any data only when liveedit operation is in process;
+// in any other time this class is very cheap.
+//
+// The primary interest of the Tracker is to record function scope structures
+// in order to analyze whether function code maybe safely patched (with new
+// code successfully reading existing data from function scopes). The Tracker
+// also collects compiled function codes.
+class LiveEditFunctionTracker {
+ public:
+ explicit LiveEditFunctionTracker(FunctionLiteral* fun);
+ ~LiveEditFunctionTracker();
+ void RecordFunctionCode(Handle<Code> code);
+ void RecordFunctionScope(Scope* scope);
+
+ static bool IsActive();
+};
+
+} } // namespace v8::internal
+
+#endif /* V*_LIVEEDIT_H_ */
diff --git a/deps/v8/src/log-utils.cc b/deps/v8/src/log-utils.cc
index fd9560418..722e0fc04 100644
--- a/deps/v8/src/log-utils.cc
+++ b/deps/v8/src/log-utils.cc
@@ -351,15 +351,6 @@ void LogMessageBuilder::WriteToLogFile() {
}
-void LogMessageBuilder::WriteCStringToLogFile(const char* str) {
- const int len = StrLength(str);
- const int written = Log::Write(str, len);
- if (written != len && write_failure_handler != NULL) {
- write_failure_handler();
- }
-}
-
-
// Formatting string for back references to the whole line. E.g. "#2" means
// "the second line above".
const char* LogRecordCompressor::kLineBackwardReferenceFormat = "#%d";
diff --git a/deps/v8/src/log-utils.h b/deps/v8/src/log-utils.h
index 3e25b0e75..b769e9046 100644
--- a/deps/v8/src/log-utils.h
+++ b/deps/v8/src/log-utils.h
@@ -268,9 +268,6 @@ class LogMessageBuilder BASE_EMBEDDED {
// Write the log message to the log file currently opened.
void WriteToLogFile();
- // Write a null-terminated string to to the log file currently opened.
- void WriteCStringToLogFile(const char* str);
-
// A handler that is called when Log::Write fails.
typedef void (*WriteFailureHandler)();
diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc
index 5de7429e5..a3fef7310 100644
--- a/deps/v8/src/log.cc
+++ b/deps/v8/src/log.cc
@@ -330,6 +330,8 @@ SlidingStateWindow* Logger::sliding_state_window_ = NULL;
const char** Logger::log_events_ = NULL;
CompressionHelper* Logger::compression_helper_ = NULL;
bool Logger::is_logging_ = false;
+int Logger::cpu_profiler_nesting_ = 0;
+int Logger::heap_profiler_nesting_ = 0;
#define DECLARE_LONG_EVENT(ignore1, long_name, ignore2) long_name,
const char* kLongLogEventsNames[Logger::NUMBER_OF_LOG_EVENTS] = {
@@ -368,15 +370,6 @@ void Logger::LogAliases() {
#endif // ENABLE_LOGGING_AND_PROFILING
-void Logger::Preamble(const char* content) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log_code) return;
- LogMessageBuilder msg;
- msg.WriteCStringToLogFile(content);
-#endif
-}
-
-
void Logger::StringEvent(const char* name, const char* value) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (FLAG_log) UncheckedStringEvent(name, value);
@@ -1164,53 +1157,61 @@ int Logger::GetActiveProfilerModules() {
}
-void Logger::PauseProfiler(int flags) {
+void Logger::PauseProfiler(int flags, int tag) {
if (!Log::IsEnabled()) return;
- const int active_modules = GetActiveProfilerModules();
- const int modules_to_disable = active_modules & flags;
- if (modules_to_disable == PROFILER_MODULE_NONE) return;
-
- if (modules_to_disable & PROFILER_MODULE_CPU) {
- profiler_->pause();
- if (FLAG_prof_lazy) {
- if (!FLAG_sliding_state_window) ticker_->Stop();
- FLAG_log_code = false;
- // Must be the same message as Log::kDynamicBufferSeal.
- LOG(UncheckedStringEvent("profiler", "pause"));
+ if (flags & PROFILER_MODULE_CPU) {
+ // It is OK to have negative nesting.
+ if (--cpu_profiler_nesting_ == 0) {
+ profiler_->pause();
+ if (FLAG_prof_lazy) {
+ if (!FLAG_sliding_state_window) ticker_->Stop();
+ FLAG_log_code = false;
+ // Must be the same message as Log::kDynamicBufferSeal.
+ LOG(UncheckedStringEvent("profiler", "pause"));
+ }
}
}
- if (modules_to_disable &
+ if (flags &
(PROFILER_MODULE_HEAP_STATS | PROFILER_MODULE_JS_CONSTRUCTORS)) {
- FLAG_log_gc = false;
+ if (--heap_profiler_nesting_ == 0) {
+ FLAG_log_gc = false;
+ }
+ }
+ if (tag != 0) {
+ IntEvent("close-tag", tag);
}
- // Turn off logging if no active modules remain.
- if ((active_modules & ~flags) == PROFILER_MODULE_NONE) {
+ if (GetActiveProfilerModules() == PROFILER_MODULE_NONE) {
is_logging_ = false;
}
}
-void Logger::ResumeProfiler(int flags) {
+void Logger::ResumeProfiler(int flags, int tag) {
if (!Log::IsEnabled()) return;
- const int modules_to_enable = ~GetActiveProfilerModules() & flags;
- if (modules_to_enable != PROFILER_MODULE_NONE) {
- is_logging_ = true;
+ if (tag != 0) {
+ IntEvent("open-tag", tag);
}
- if (modules_to_enable & PROFILER_MODULE_CPU) {
- if (FLAG_prof_lazy) {
- profiler_->Engage();
- LOG(UncheckedStringEvent("profiler", "resume"));
- FLAG_log_code = true;
- LogCompiledFunctions();
- LogFunctionObjects();
- LogAccessorCallbacks();
- if (!FLAG_sliding_state_window) ticker_->Start();
+ if (flags & PROFILER_MODULE_CPU) {
+ if (cpu_profiler_nesting_++ == 0) {
+ is_logging_ = true;
+ if (FLAG_prof_lazy) {
+ profiler_->Engage();
+ LOG(UncheckedStringEvent("profiler", "resume"));
+ FLAG_log_code = true;
+ LogCompiledFunctions();
+ LogFunctionObjects();
+ LogAccessorCallbacks();
+ if (!FLAG_sliding_state_window) ticker_->Start();
+ }
+ profiler_->resume();
}
- profiler_->resume();
}
- if (modules_to_enable &
+ if (flags &
(PROFILER_MODULE_HEAP_STATS | PROFILER_MODULE_JS_CONSTRUCTORS)) {
- FLAG_log_gc = true;
+ if (heap_profiler_nesting_++ == 0) {
+ is_logging_ = true;
+ FLAG_log_gc = true;
+ }
}
}
@@ -1219,7 +1220,7 @@ void Logger::ResumeProfiler(int flags) {
// either from main or Profiler's thread.
void Logger::StopLoggingAndProfiling() {
Log::stop();
- PauseProfiler(PROFILER_MODULE_CPU);
+ PauseProfiler(PROFILER_MODULE_CPU, 0);
}
@@ -1261,7 +1262,9 @@ void Logger::LogCodeObject(Object* object) {
case Code::FUNCTION:
return; // We log this later using LogCompiledFunctions.
case Code::STUB:
- description = CodeStub::MajorName(code_object->major_key());
+ description = CodeStub::MajorName(code_object->major_key(), true);
+ if (description == NULL)
+ description = "A stub from the snapshot";
tag = Logger::STUB_TAG;
break;
case Code::BUILTIN:
@@ -1294,6 +1297,15 @@ void Logger::LogCodeObject(Object* object) {
}
+void Logger::LogCodeObjects() {
+ AssertNoAllocation no_alloc;
+ HeapIterator iterator;
+ for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
+ if (obj->IsCode()) LogCodeObject(obj);
+ }
+}
+
+
void Logger::LogCompiledFunctions() {
HandleScope scope;
const int compiled_funcs_count = EnumerateCompiledFunctions(NULL);
diff --git a/deps/v8/src/log.h b/deps/v8/src/log.h
index 1f6e60e1a..eb8369cf0 100644
--- a/deps/v8/src/log.h
+++ b/deps/v8/src/log.h
@@ -161,12 +161,6 @@ class Logger {
// Enable the computation of a sliding window of states.
static void EnableSlidingStateWindow();
- // Write a raw string to the log to be used as a preamble.
- // No check is made that the 'preamble' is actually at the beginning
- // of the log. The preample is used to write code events saved in the
- // snapshot.
- static void Preamble(const char* content);
-
// Emits an event with a string value -> (name, value).
static void StringEvent(const char* name, const char* value);
@@ -277,8 +271,8 @@ class Logger {
// Pause/Resume collection of profiling data.
// When data collection is paused, CPU Tick events are discarded until
// data collection is Resumed.
- static void PauseProfiler(int flags);
- static void ResumeProfiler(int flags);
+ static void PauseProfiler(int flags, int tag);
+ static void ResumeProfiler(int flags, int tag);
static int GetActiveProfilerModules();
// If logging is performed into a memory buffer, allows to
@@ -292,7 +286,7 @@ class Logger {
// Logs all accessor callbacks found in the heap.
static void LogAccessorCallbacks();
// Used for logging stubs found in the snapshot.
- static void LogCodeObject(Object* code_object);
+ static void LogCodeObjects();
private:
@@ -325,6 +319,9 @@ class Logger {
// Emits the source code of a regexp. Used by regexp events.
static void LogRegExpSource(Handle<JSRegExp> regexp);
+ // Used for logging stubs found in the snapshot.
+ static void LogCodeObject(Object* code_object);
+
// Emits a profiler tick event. Used by the profiler thread.
static void TickEvent(TickSample* sample, bool overflow);
@@ -376,6 +373,8 @@ class Logger {
friend class LoggerTestHelper;
static bool is_logging_;
+ static int cpu_profiler_nesting_;
+ static int heap_profiler_nesting_;
#else
static bool is_logging() { return false; }
#endif
diff --git a/deps/v8/src/macro-assembler.h b/deps/v8/src/macro-assembler.h
index 0fe432823..81e5bf7a4 100644
--- a/deps/v8/src/macro-assembler.h
+++ b/deps/v8/src/macro-assembler.h
@@ -61,6 +61,8 @@ enum AllocationFlags {
RESULT_CONTAINS_TOP = 1 << 1
};
+// Invalid depth in prototype chain.
+const int kInvalidProtoDepth = -1;
#if V8_TARGET_ARCH_IA32
#include "assembler.h"
@@ -86,6 +88,13 @@ enum AllocationFlags {
#endif
#include "code.h" // must be after assembler_*.h
#include "arm/macro-assembler-arm.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "mips/constants-mips.h"
+#include "assembler.h"
+#include "mips/assembler-mips.h"
+#include "mips/assembler-mips-inl.h"
+#include "code.h" // must be after assembler_*.h
+#include "mips/macro-assembler-mips.h"
#else
#error Unsupported target architecture.
#endif
diff --git a/deps/v8/src/math.js b/deps/v8/src/math.js
index d804648f5..5745e6179 100644
--- a/deps/v8/src/math.js
+++ b/deps/v8/src/math.js
@@ -233,7 +233,7 @@ function SetupMath() {
"SQRT2",
1.4142135623730951,
DONT_ENUM | DONT_DELETE | READ_ONLY);
- %TransformToFastProperties($Math);
+ %ToFastProperties($Math);
// Setup non-enumerable functions of the Math object and
// set their names.
diff --git a/deps/v8/src/messages.js b/deps/v8/src/messages.js
index df008c91b..ca82afe53 100644
--- a/deps/v8/src/messages.js
+++ b/deps/v8/src/messages.js
@@ -162,6 +162,8 @@ function FormatMessage(message) {
value_and_accessor: "Invalid property. A property cannot both have accessors and be writable or have a value: %0",
proto_object_or_null: "Object prototype may only be an Object or null",
property_desc_object: "Property description must be an object: %0",
+ redefine_disallowed: "Cannot redefine property: %0",
+ define_disallowed: "Cannot define property, object is not extensible: %0",
// RangeError
invalid_array_length: "Invalid array length",
stack_overflow: "Maximum call stack size exceeded",
diff --git a/deps/v8/src/mips/assembler-mips-inl.h b/deps/v8/src/mips/assembler-mips-inl.h
new file mode 100644
index 000000000..2e634617c
--- /dev/null
+++ b/deps/v8/src/mips/assembler-mips-inl.h
@@ -0,0 +1,215 @@
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2010 the V8 project authors. All rights reserved.
+
+
+#ifndef V8_MIPS_ASSEMBLER_MIPS_INL_H_
+#define V8_MIPS_ASSEMBLER_MIPS_INL_H_
+
+#include "mips/assembler-mips.h"
+#include "cpu.h"
+
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// Condition
+
+Condition NegateCondition(Condition cc) {
+ ASSERT(cc != cc_always);
+ return static_cast<Condition>(cc ^ 1);
+}
+
+
+// -----------------------------------------------------------------------------
+// Operand and MemOperand
+
+Operand::Operand(int32_t immediate, RelocInfo::Mode rmode) {
+ rm_ = no_reg;
+ imm32_ = immediate;
+ rmode_ = rmode;
+}
+
+Operand::Operand(const ExternalReference& f) {
+ rm_ = no_reg;
+ imm32_ = reinterpret_cast<int32_t>(f.address());
+ rmode_ = RelocInfo::EXTERNAL_REFERENCE;
+}
+
+Operand::Operand(const char* s) {
+ rm_ = no_reg;
+ imm32_ = reinterpret_cast<int32_t>(s);
+ rmode_ = RelocInfo::EMBEDDED_STRING;
+}
+
+Operand::Operand(Smi* value) {
+ rm_ = no_reg;
+ imm32_ = reinterpret_cast<intptr_t>(value);
+ rmode_ = RelocInfo::NONE;
+}
+
+Operand::Operand(Register rm) {
+ rm_ = rm;
+}
+
+bool Operand::is_reg() const {
+ return rm_.is_valid();
+}
+
+
+
+// -----------------------------------------------------------------------------
+// RelocInfo
+
+void RelocInfo::apply(intptr_t delta) {
+ // On MIPS we do not use pc relative addressing, so we don't need to patch the
+ // code here.
+}
+
+
+Address RelocInfo::target_address() {
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
+ return Assembler::target_address_at(pc_);
+}
+
+
+Address RelocInfo::target_address_address() {
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
+ return reinterpret_cast<Address>(pc_);
+}
+
+
+void RelocInfo::set_target_address(Address target) {
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
+ Assembler::set_target_address_at(pc_, target);
+}
+
+
+Object* RelocInfo::target_object() {
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ return reinterpret_cast<Object*>(Assembler::target_address_at(pc_));
+}
+
+
+Handle<Object> RelocInfo::target_object_handle(Assembler *origin) {
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ return Handle<Object>(reinterpret_cast<Object**>(
+ Assembler::target_address_at(pc_)));
+}
+
+
+Object** RelocInfo::target_object_address() {
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ return reinterpret_cast<Object**>(pc_);
+}
+
+
+void RelocInfo::set_target_object(Object* target) {
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
+}
+
+
+Address* RelocInfo::target_reference_address() {
+ ASSERT(rmode_ == EXTERNAL_REFERENCE);
+ return reinterpret_cast<Address*>(pc_);
+}
+
+
+Address RelocInfo::call_address() {
+ ASSERT(IsPatchedReturnSequence());
+ // The 2 instructions offset assumes patched return sequence.
+ ASSERT(IsJSReturn(rmode()));
+ return Memory::Address_at(pc_ + 2 * Assembler::kInstrSize);
+}
+
+
+void RelocInfo::set_call_address(Address target) {
+ ASSERT(IsPatchedReturnSequence());
+ // The 2 instructions offset assumes patched return sequence.
+ ASSERT(IsJSReturn(rmode()));
+ Memory::Address_at(pc_ + 2 * Assembler::kInstrSize) = target;
+}
+
+
+Object* RelocInfo::call_object() {
+ return *call_object_address();
+}
+
+
+Object** RelocInfo::call_object_address() {
+ ASSERT(IsPatchedReturnSequence());
+ // The 2 instructions offset assumes patched return sequence.
+ ASSERT(IsJSReturn(rmode()));
+ return reinterpret_cast<Object**>(pc_ + 2 * Assembler::kInstrSize);
+}
+
+
+void RelocInfo::set_call_object(Object* target) {
+ *call_object_address() = target;
+}
+
+
+bool RelocInfo::IsPatchedReturnSequence() {
+#ifdef DEBUG
+ PrintF("%s - %d - %s : Checking for jal(r)",
+ __FILE__, __LINE__, __func__);
+#endif
+ return ((Assembler::instr_at(pc_) & kOpcodeMask) == SPECIAL) &&
+ (((Assembler::instr_at(pc_) & kFunctionFieldMask) == JAL) ||
+ ((Assembler::instr_at(pc_) & kFunctionFieldMask) == JALR));
+}
+
+
+// -----------------------------------------------------------------------------
+// Assembler
+
+
+void Assembler::CheckBuffer() {
+ if (buffer_space() <= kGap) {
+ GrowBuffer();
+ }
+}
+
+
+void Assembler::emit(Instr x) {
+ CheckBuffer();
+ *reinterpret_cast<Instr*>(pc_) = x;
+ pc_ += kInstrSize;
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_MIPS_ASSEMBLER_MIPS_INL_H_
diff --git a/deps/v8/src/mips/assembler-mips.cc b/deps/v8/src/mips/assembler-mips.cc
new file mode 100644
index 000000000..4a91624ed
--- /dev/null
+++ b/deps/v8/src/mips/assembler-mips.cc
@@ -0,0 +1,1208 @@
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2010 the V8 project authors. All rights reserved.
+
+
+#include "v8.h"
+#include "mips/assembler-mips-inl.h"
+#include "serialize.h"
+
+
+namespace v8 {
+namespace internal {
+
+
+
+const Register no_reg = { -1 };
+
+const Register zero_reg = { 0 };
+const Register at = { 1 };
+const Register v0 = { 2 };
+const Register v1 = { 3 };
+const Register a0 = { 4 };
+const Register a1 = { 5 };
+const Register a2 = { 6 };
+const Register a3 = { 7 };
+const Register t0 = { 8 };
+const Register t1 = { 9 };
+const Register t2 = { 10 };
+const Register t3 = { 11 };
+const Register t4 = { 12 };
+const Register t5 = { 13 };
+const Register t6 = { 14 };
+const Register t7 = { 15 };
+const Register s0 = { 16 };
+const Register s1 = { 17 };
+const Register s2 = { 18 };
+const Register s3 = { 19 };
+const Register s4 = { 20 };
+const Register s5 = { 21 };
+const Register s6 = { 22 };
+const Register s7 = { 23 };
+const Register t8 = { 24 };
+const Register t9 = { 25 };
+const Register k0 = { 26 };
+const Register k1 = { 27 };
+const Register gp = { 28 };
+const Register sp = { 29 };
+const Register s8_fp = { 30 };
+const Register ra = { 31 };
+
+
+const FPURegister no_creg = { -1 };
+
+const FPURegister f0 = { 0 };
+const FPURegister f1 = { 1 };
+const FPURegister f2 = { 2 };
+const FPURegister f3 = { 3 };
+const FPURegister f4 = { 4 };
+const FPURegister f5 = { 5 };
+const FPURegister f6 = { 6 };
+const FPURegister f7 = { 7 };
+const FPURegister f8 = { 8 };
+const FPURegister f9 = { 9 };
+const FPURegister f10 = { 10 };
+const FPURegister f11 = { 11 };
+const FPURegister f12 = { 12 };
+const FPURegister f13 = { 13 };
+const FPURegister f14 = { 14 };
+const FPURegister f15 = { 15 };
+const FPURegister f16 = { 16 };
+const FPURegister f17 = { 17 };
+const FPURegister f18 = { 18 };
+const FPURegister f19 = { 19 };
+const FPURegister f20 = { 20 };
+const FPURegister f21 = { 21 };
+const FPURegister f22 = { 22 };
+const FPURegister f23 = { 23 };
+const FPURegister f24 = { 24 };
+const FPURegister f25 = { 25 };
+const FPURegister f26 = { 26 };
+const FPURegister f27 = { 27 };
+const FPURegister f28 = { 28 };
+const FPURegister f29 = { 29 };
+const FPURegister f30 = { 30 };
+const FPURegister f31 = { 31 };
+
+int ToNumber(Register reg) {
+ ASSERT(reg.is_valid());
+ const int kNumbers[] = {
+ 0, // zero_reg
+ 1, // at
+ 2, // v0
+ 3, // v1
+ 4, // a0
+ 5, // a1
+ 6, // a2
+ 7, // a3
+ 8, // t0
+ 9, // t1
+ 10, // t2
+ 11, // t3
+ 12, // t4
+ 13, // t5
+ 14, // t6
+ 15, // t7
+ 16, // s0
+ 17, // s1
+ 18, // s2
+ 19, // s3
+ 20, // s4
+ 21, // s5
+ 22, // s6
+ 23, // s7
+ 24, // t8
+ 25, // t9
+ 26, // k0
+ 27, // k1
+ 28, // gp
+ 29, // sp
+ 30, // s8_fp
+ 31, // ra
+ };
+ return kNumbers[reg.code()];
+}
+
+Register ToRegister(int num) {
+ ASSERT(num >= 0 && num < kNumRegisters);
+ const Register kRegisters[] = {
+ zero_reg,
+ at,
+ v0, v1,
+ a0, a1, a2, a3,
+ t0, t1, t2, t3, t4, t5, t6, t7,
+ s0, s1, s2, s3, s4, s5, s6, s7,
+ t8, t9,
+ k0, k1,
+ gp,
+ sp,
+ s8_fp,
+ ra
+ };
+ return kRegisters[num];
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of RelocInfo.
+
+const int RelocInfo::kApplyMask = 0;
+
+// Patch the code at the current address with the supplied instructions.
+void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
+ Instr* pc = reinterpret_cast<Instr*>(pc_);
+ Instr* instr = reinterpret_cast<Instr*>(instructions);
+ for (int i = 0; i < instruction_count; i++) {
+ *(pc + i) = *(instr + i);
+ }
+
+ // Indicate that code has changed.
+ CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
+}
+
+
+// Patch the code at the current PC with a call to the target address.
+// Additional guard instructions can be added if required.
+void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
+ // Patch the code at the current address with a call to the target.
+ UNIMPLEMENTED_MIPS();
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of Operand and MemOperand.
+// See assembler-mips-inl.h for inlined constructors.
+
+Operand::Operand(Handle<Object> handle) {
+ rm_ = no_reg;
+ // Verify all Objects referred by code are NOT in new space.
+ Object* obj = *handle;
+ ASSERT(!Heap::InNewSpace(obj));
+ if (obj->IsHeapObject()) {
+ imm32_ = reinterpret_cast<intptr_t>(handle.location());
+ rmode_ = RelocInfo::EMBEDDED_OBJECT;
+ } else {
+ // No relocation needed.
+ imm32_ = reinterpret_cast<intptr_t>(obj);
+ rmode_ = RelocInfo::NONE;
+ }
+}
+
+MemOperand::MemOperand(Register rm, int16_t offset) : Operand(rm) {
+ offset_ = offset;
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of Assembler.
+
+static const int kMinimalBufferSize = 4*KB;
+static byte* spare_buffer_ = NULL;
+
+Assembler::Assembler(void* buffer, int buffer_size) {
+ if (buffer == NULL) {
+ // Do our own buffer management.
+ if (buffer_size <= kMinimalBufferSize) {
+ buffer_size = kMinimalBufferSize;
+
+ if (spare_buffer_ != NULL) {
+ buffer = spare_buffer_;
+ spare_buffer_ = NULL;
+ }
+ }
+ if (buffer == NULL) {
+ buffer_ = NewArray<byte>(buffer_size);
+ } else {
+ buffer_ = static_cast<byte*>(buffer);
+ }
+ buffer_size_ = buffer_size;
+ own_buffer_ = true;
+
+ } else {
+ // Use externally provided buffer instead.
+ ASSERT(buffer_size > 0);
+ buffer_ = static_cast<byte*>(buffer);
+ buffer_size_ = buffer_size;
+ own_buffer_ = false;
+ }
+
+ // Setup buffer pointers.
+ ASSERT(buffer_ != NULL);
+ pc_ = buffer_;
+ reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
+ current_statement_position_ = RelocInfo::kNoPosition;
+ current_position_ = RelocInfo::kNoPosition;
+ written_statement_position_ = current_statement_position_;
+ written_position_ = current_position_;
+}
+
+
+Assembler::~Assembler() {
+ if (own_buffer_) {
+ if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
+ spare_buffer_ = buffer_;
+ } else {
+ DeleteArray(buffer_);
+ }
+ }
+}
+
+
+void Assembler::GetCode(CodeDesc* desc) {
+ ASSERT(pc_ <= reloc_info_writer.pos()); // no overlap
+ // Setup code descriptor.
+ desc->buffer = buffer_;
+ desc->buffer_size = buffer_size_;
+ desc->instr_size = pc_offset();
+ desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
+}
+
+
+// Labels refer to positions in the (to be) generated code.
+// There are bound, linked, and unused labels.
+//
+// Bound labels refer to known positions in the already
+// generated code. pos() is the position the label refers to.
+//
+// Linked labels refer to unknown positions in the code
+// to be generated; pos() is the position of the last
+// instruction using the label.
+
+
+// The link chain is terminated by a negative code position (must be aligned).
+const int kEndOfChain = -4;
+
+bool Assembler::is_branch(Instr instr) {
+ uint32_t opcode = ((instr & kOpcodeMask));
+ uint32_t rt_field = ((instr & kRtFieldMask));
+ uint32_t rs_field = ((instr & kRsFieldMask));
+ // Checks if the instruction is a branch.
+ return opcode == BEQ ||
+ opcode == BNE ||
+ opcode == BLEZ ||
+ opcode == BGTZ ||
+ opcode == BEQL ||
+ opcode == BNEL ||
+ opcode == BLEZL ||
+ opcode == BGTZL||
+ (opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
+ rt_field == BLTZAL || rt_field == BGEZAL)) ||
+ (opcode == COP1 && rs_field == BC1); // Coprocessor branch.
+}
+
+
+int Assembler::target_at(int32_t pos) {
+ Instr instr = instr_at(pos);
+ if ((instr & ~kImm16Mask) == 0) {
+ // Emitted label constant, not part of a branch.
+ return instr - (Code::kHeaderSize - kHeapObjectTag);
+ }
+ // Check we have a branch instruction.
+ ASSERT(is_branch(instr));
+ // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
+ // the compiler uses arithmectic shifts for signed integers.
+ int32_t imm18 = ((instr &
+ static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
+
+ return pos + kBranchPCOffset + imm18;
+}
+
+
+void Assembler::target_at_put(int32_t pos, int32_t target_pos) {
+ Instr instr = instr_at(pos);
+ if ((instr & ~kImm16Mask) == 0) {
+ ASSERT(target_pos == kEndOfChain || target_pos >= 0);
+ // Emitted label constant, not part of a branch.
+ // Make label relative to Code* of generated Code object.
+ instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
+ return;
+ }
+
+ ASSERT(is_branch(instr));
+ int32_t imm18 = target_pos - (pos + kBranchPCOffset);
+ ASSERT((imm18 & 3) == 0);
+
+ instr &= ~kImm16Mask;
+ int32_t imm16 = imm18 >> 2;
+ ASSERT(is_int16(imm16));
+
+ instr_at_put(pos, instr | (imm16 & kImm16Mask));
+}
+
+
+void Assembler::print(Label* L) {
+ if (L->is_unused()) {
+ PrintF("unused label\n");
+ } else if (L->is_bound()) {
+ PrintF("bound label to %d\n", L->pos());
+ } else if (L->is_linked()) {
+ Label l = *L;
+ PrintF("unbound label");
+ while (l.is_linked()) {
+ PrintF("@ %d ", l.pos());
+ Instr instr = instr_at(l.pos());
+ if ((instr & ~kImm16Mask) == 0) {
+ PrintF("value\n");
+ } else {
+ PrintF("%d\n", instr);
+ }
+ next(&l);
+ }
+ } else {
+ PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
+ }
+}
+
+
+void Assembler::bind_to(Label* L, int pos) {
+ ASSERT(0 <= pos && pos <= pc_offset()); // must have a valid binding position
+ while (L->is_linked()) {
+ int32_t fixup_pos = L->pos();
+ next(L); // call next before overwriting link with target at fixup_pos
+ target_at_put(fixup_pos, pos);
+ }
+ L->bind_to(pos);
+
+ // Keep track of the last bound label so we don't eliminate any instructions
+ // before a bound label.
+ if (pos > last_bound_pos_)
+ last_bound_pos_ = pos;
+}
+
+
+void Assembler::link_to(Label* L, Label* appendix) {
+ if (appendix->is_linked()) {
+ if (L->is_linked()) {
+ // Append appendix to L's list.
+ int fixup_pos;
+ int link = L->pos();
+ do {
+ fixup_pos = link;
+ link = target_at(fixup_pos);
+ } while (link > 0);
+ ASSERT(link == kEndOfChain);
+ target_at_put(fixup_pos, appendix->pos());
+ } else {
+ // L is empty, simply use appendix
+ *L = *appendix;
+ }
+ }
+ appendix->Unuse(); // appendix should not be used anymore
+}
+
+
+void Assembler::bind(Label* L) {
+ ASSERT(!L->is_bound()); // label can only be bound once
+ bind_to(L, pc_offset());
+}
+
+
+void Assembler::next(Label* L) {
+ ASSERT(L->is_linked());
+ int link = target_at(L->pos());
+ if (link > 0) {
+ L->link_to(link);
+ } else {
+ ASSERT(link == kEndOfChain);
+ L->Unuse();
+ }
+}
+
+
+// We have to use a temporary register for things that can be relocated even
+// if they can be encoded in the MIPS's 16 bits of immediate-offset instruction
+// space. There is no guarantee that the relocated location can be similarly
+// encoded.
+bool Assembler::MustUseAt(RelocInfo::Mode rmode) {
+ if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
+ return Serializer::enabled();
+ } else if (rmode == RelocInfo::NONE) {
+ return false;
+ }
+ return true;
+}
+
+
+void Assembler::GenInstrRegister(Opcode opcode,
+ Register rs,
+ Register rt,
+ Register rd,
+ uint16_t sa,
+ SecondaryField func) {
+ ASSERT(rd.is_valid() && rs.is_valid() && rt.is_valid() && is_uint5(sa));
+ Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
+ | (rd.code() << kRdShift) | (sa << kSaShift) | func;
+ emit(instr);
+}
+
+
+void Assembler::GenInstrRegister(Opcode opcode,
+ SecondaryField fmt,
+ FPURegister ft,
+ FPURegister fs,
+ FPURegister fd,
+ SecondaryField func) {
+ ASSERT(fd.is_valid() && fs.is_valid() && ft.is_valid());
+ Instr instr = opcode | fmt | (ft.code() << 16) | (fs.code() << kFsShift)
+ | (fd.code() << 6) | func;
+ emit(instr);
+}
+
+
+void Assembler::GenInstrRegister(Opcode opcode,
+ SecondaryField fmt,
+ Register rt,
+ FPURegister fs,
+ FPURegister fd,
+ SecondaryField func) {
+ ASSERT(fd.is_valid() && fs.is_valid() && rt.is_valid());
+ Instr instr = opcode | fmt | (rt.code() << kRtShift)
+ | (fs.code() << kFsShift) | (fd.code() << 6) | func;
+ emit(instr);
+}
+
+
+// Instructions with immediate value.
+// Registers are in the order of the instruction encoding, from left to right.
+void Assembler::GenInstrImmediate(Opcode opcode,
+ Register rs,
+ Register rt,
+ int32_t j) {
+ ASSERT(rs.is_valid() && rt.is_valid() && (is_int16(j) || is_uint16(j)));
+ Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
+ | (j & kImm16Mask);
+ emit(instr);
+}
+
+
+void Assembler::GenInstrImmediate(Opcode opcode,
+ Register rs,
+ SecondaryField SF,
+ int32_t j) {
+ ASSERT(rs.is_valid() && (is_int16(j) || is_uint16(j)));
+ Instr instr = opcode | (rs.code() << kRsShift) | SF | (j & kImm16Mask);
+ emit(instr);
+}
+
+
+void Assembler::GenInstrImmediate(Opcode opcode,
+ Register rs,
+ FPURegister ft,
+ int32_t j) {
+ ASSERT(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
+ Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift)
+ | (j & kImm16Mask);
+ emit(instr);
+}
+
+
+// Registers are in the order of the instruction encoding, from left to right.
+void Assembler::GenInstrJump(Opcode opcode,
+ uint32_t address) {
+ ASSERT(is_uint26(address));
+ Instr instr = opcode | address;
+ emit(instr);
+}
+
+
+int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
+ int32_t target_pos;
+ if (L->is_bound()) {
+ target_pos = L->pos();
+ } else {
+ if (L->is_linked()) {
+ target_pos = L->pos(); // L's link
+ } else {
+ target_pos = kEndOfChain;
+ }
+ L->link_to(pc_offset());
+ }
+
+ int32_t offset = target_pos - (pc_offset() + kBranchPCOffset);
+ return offset;
+}
+
+
+void Assembler::label_at_put(Label* L, int at_offset) {
+ int target_pos;
+ if (L->is_bound()) {
+ target_pos = L->pos();
+ } else {
+ if (L->is_linked()) {
+ target_pos = L->pos(); // L's link
+ } else {
+ target_pos = kEndOfChain;
+ }
+ L->link_to(at_offset);
+ instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
+ }
+}
+
+
+//------- Branch and jump instructions --------
+
+void Assembler::b(int16_t offset) {
+ beq(zero_reg, zero_reg, offset);
+}
+
+
+void Assembler::bal(int16_t offset) {
+ bgezal(zero_reg, offset);
+}
+
+
+void Assembler::beq(Register rs, Register rt, int16_t offset) {
+ GenInstrImmediate(BEQ, rs, rt, offset);
+}
+
+
+void Assembler::bgez(Register rs, int16_t offset) {
+ GenInstrImmediate(REGIMM, rs, BGEZ, offset);
+}
+
+
+void Assembler::bgezal(Register rs, int16_t offset) {
+ GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
+}
+
+
+void Assembler::bgtz(Register rs, int16_t offset) {
+ GenInstrImmediate(BGTZ, rs, zero_reg, offset);
+}
+
+
+void Assembler::blez(Register rs, int16_t offset) {
+ GenInstrImmediate(BLEZ, rs, zero_reg, offset);
+}
+
+
+void Assembler::bltz(Register rs, int16_t offset) {
+ GenInstrImmediate(REGIMM, rs, BLTZ, offset);
+}
+
+
+void Assembler::bltzal(Register rs, int16_t offset) {
+ GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
+}
+
+
+void Assembler::bne(Register rs, Register rt, int16_t offset) {
+ GenInstrImmediate(BNE, rs, rt, offset);
+}
+
+
+void Assembler::j(int32_t target) {
+ ASSERT(is_uint28(target) && ((target & 3) == 0));
+ GenInstrJump(J, target >> 2);
+}
+
+
+void Assembler::jr(Register rs) {
+ GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
+}
+
+
+void Assembler::jal(int32_t target) {
+ ASSERT(is_uint28(target) && ((target & 3) == 0));
+ GenInstrJump(JAL, target >> 2);
+}
+
+
+void Assembler::jalr(Register rs, Register rd) {
+ GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR);
+}
+
+
+//-------Data-processing-instructions---------
+
+// Arithmetic.
+
+void Assembler::add(Register rd, Register rs, Register rt) {
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADD);
+}
+
+
+void Assembler::addu(Register rd, Register rs, Register rt) {
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADDU);
+}
+
+
+void Assembler::addi(Register rd, Register rs, int32_t j) {
+ GenInstrImmediate(ADDI, rs, rd, j);
+}
+
+
+void Assembler::addiu(Register rd, Register rs, int32_t j) {
+ GenInstrImmediate(ADDIU, rs, rd, j);
+}
+
+
+void Assembler::sub(Register rd, Register rs, Register rt) {
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUB);
+}
+
+
+void Assembler::subu(Register rd, Register rs, Register rt) {
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUBU);
+}
+
+
+void Assembler::mul(Register rd, Register rs, Register rt) {
+ GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL);
+}
+
+
+void Assembler::mult(Register rs, Register rt) {
+ GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULT);
+}
+
+
+void Assembler::multu(Register rs, Register rt) {
+ GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULTU);
+}
+
+
+void Assembler::div(Register rs, Register rt) {
+ GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIV);
+}
+
+
+void Assembler::divu(Register rs, Register rt) {
+ GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU);
+}
+
+
+// Logical.
+
+void Assembler::and_(Register rd, Register rs, Register rt) {
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, AND);
+}
+
+
+void Assembler::andi(Register rt, Register rs, int32_t j) {
+ GenInstrImmediate(ANDI, rs, rt, j);
+}
+
+
+void Assembler::or_(Register rd, Register rs, Register rt) {
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, OR);
+}
+
+
+void Assembler::ori(Register rt, Register rs, int32_t j) {
+ GenInstrImmediate(ORI, rs, rt, j);
+}
+
+
+void Assembler::xor_(Register rd, Register rs, Register rt) {
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, XOR);
+}
+
+
+void Assembler::xori(Register rt, Register rs, int32_t j) {
+ GenInstrImmediate(XORI, rs, rt, j);
+}
+
+
+void Assembler::nor(Register rd, Register rs, Register rt) {
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, NOR);
+}
+
+
+// Shifts.
+void Assembler::sll(Register rd, Register rt, uint16_t sa) {
+ GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SLL);
+}
+
+
+void Assembler::sllv(Register rd, Register rt, Register rs) {
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLLV);
+}
+
+
+void Assembler::srl(Register rd, Register rt, uint16_t sa) {
+ GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRL);
+}
+
+
+void Assembler::srlv(Register rd, Register rt, Register rs) {
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRLV);
+}
+
+
+void Assembler::sra(Register rd, Register rt, uint16_t sa) {
+ GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRA);
+}
+
+
+void Assembler::srav(Register rd, Register rt, Register rs) {
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRAV);
+}
+
+
+//------------Memory-instructions-------------
+
+void Assembler::lb(Register rd, const MemOperand& rs) {
+ GenInstrImmediate(LB, rs.rm(), rd, rs.offset_);
+}
+
+
+void Assembler::lbu(Register rd, const MemOperand& rs) {
+ GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_);
+}
+
+
+void Assembler::lw(Register rd, const MemOperand& rs) {
+ GenInstrImmediate(LW, rs.rm(), rd, rs.offset_);
+}
+
+
+void Assembler::sb(Register rd, const MemOperand& rs) {
+ GenInstrImmediate(SB, rs.rm(), rd, rs.offset_);
+}
+
+
+void Assembler::sw(Register rd, const MemOperand& rs) {
+ GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
+}
+
+
+void Assembler::lui(Register rd, int32_t j) {
+ GenInstrImmediate(LUI, zero_reg, rd, j);
+}
+
+
+//-------------Misc-instructions--------------
+
+// Break / Trap instructions.
+void Assembler::break_(uint32_t code) {
+ ASSERT((code & ~0xfffff) == 0);
+ Instr break_instr = SPECIAL | BREAK | (code << 6);
+ emit(break_instr);
+}
+
+
+void Assembler::tge(Register rs, Register rt, uint16_t code) {
+ ASSERT(is_uint10(code));
+ Instr instr = SPECIAL | TGE | rs.code() << kRsShift
+ | rt.code() << kRtShift | code << 6;
+ emit(instr);
+}
+
+
+void Assembler::tgeu(Register rs, Register rt, uint16_t code) {
+ ASSERT(is_uint10(code));
+ Instr instr = SPECIAL | TGEU | rs.code() << kRsShift
+ | rt.code() << kRtShift | code << 6;
+ emit(instr);
+}
+
+
+void Assembler::tlt(Register rs, Register rt, uint16_t code) {
+ ASSERT(is_uint10(code));
+ Instr instr =
+ SPECIAL | TLT | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
+ emit(instr);
+}
+
+
+void Assembler::tltu(Register rs, Register rt, uint16_t code) {
+ ASSERT(is_uint10(code));
+ Instr instr = SPECIAL | TLTU | rs.code() << kRsShift
+ | rt.code() << kRtShift | code << 6;
+ emit(instr);
+}
+
+
+void Assembler::teq(Register rs, Register rt, uint16_t code) {
+ ASSERT(is_uint10(code));
+ Instr instr =
+ SPECIAL | TEQ | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
+ emit(instr);
+}
+
+
+void Assembler::tne(Register rs, Register rt, uint16_t code) {
+ ASSERT(is_uint10(code));
+ Instr instr =
+ SPECIAL | TNE | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
+ emit(instr);
+}
+
+
+// Move from HI/LO register.
+
+void Assembler::mfhi(Register rd) {
+ GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFHI);
+}
+
+
+void Assembler::mflo(Register rd) {
+ GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFLO);
+}
+
+
+// Set on less than instructions.
+void Assembler::slt(Register rd, Register rs, Register rt) {
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLT);
+}
+
+
+void Assembler::sltu(Register rd, Register rs, Register rt) {
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLTU);
+}
+
+
+void Assembler::slti(Register rt, Register rs, int32_t j) {
+ GenInstrImmediate(SLTI, rs, rt, j);
+}
+
+
+void Assembler::sltiu(Register rt, Register rs, int32_t j) {
+ GenInstrImmediate(SLTIU, rs, rt, j);
+}
+
+
+//--------Coprocessor-instructions----------------
+
+// Load, store, move.
+void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
+ GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
+}
+
+
+void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
+ GenInstrImmediate(LDC1, src.rm(), fd, src.offset_);
+}
+
+
+void Assembler::swc1(FPURegister fd, const MemOperand& src) {
+ GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
+}
+
+
+void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
+ GenInstrImmediate(SDC1, src.rm(), fd, src.offset_);
+}
+
+
+void Assembler::mtc1(FPURegister fs, Register rt) {
+ GenInstrRegister(COP1, MTC1, rt, fs, f0);
+}
+
+
+void Assembler::mthc1(FPURegister fs, Register rt) {
+ GenInstrRegister(COP1, MTHC1, rt, fs, f0);
+}
+
+
+void Assembler::mfc1(FPURegister fs, Register rt) {
+ GenInstrRegister(COP1, MFC1, rt, fs, f0);
+}
+
+
+void Assembler::mfhc1(FPURegister fs, Register rt) {
+ GenInstrRegister(COP1, MFHC1, rt, fs, f0);
+}
+
+
+// Conversions.
+
+void Assembler::cvt_w_s(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, S, f0, fs, fd, CVT_W_S);
+}
+
+
+void Assembler::cvt_w_d(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, D, f0, fs, fd, CVT_W_D);
+}
+
+
+void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
+}
+
+
+void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
+}
+
+
+void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W);
+}
+
+
+void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
+}
+
+
+void Assembler::cvt_s_d(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, D, f0, fs, fd, CVT_S_D);
+}
+
+
+void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, W, f0, fs, fd, CVT_D_W);
+}
+
+
+void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
+}
+
+
+void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, S, f0, fs, fd, CVT_D_S);
+}
+
+
+// Conditions.
+void Assembler::c(FPUCondition cond, SecondaryField fmt,
+ FPURegister ft, FPURegister fs, uint16_t cc) {
+ ASSERT(is_uint3(cc));
+ ASSERT((fmt & ~(31 << kRsShift)) == 0);
+ Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift
+ | cc << 8 | 3 << 4 | cond;
+ emit(instr);
+}
+
+
+void Assembler::bc1f(int16_t offset, uint16_t cc) {
+ ASSERT(is_uint3(cc));
+ Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask);
+ emit(instr);
+}
+
+
+void Assembler::bc1t(int16_t offset, uint16_t cc) {
+ ASSERT(is_uint3(cc));
+ Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask);
+ emit(instr);
+}
+
+
+// Debugging.
+void Assembler::RecordJSReturn() {
+ WriteRecordedPositions();
+ CheckBuffer();
+ RecordRelocInfo(RelocInfo::JS_RETURN);
+}
+
+
+void Assembler::RecordComment(const char* msg) {
+ if (FLAG_debug_code) {
+ CheckBuffer();
+ RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
+ }
+}
+
+
+void Assembler::RecordPosition(int pos) {
+ if (pos == RelocInfo::kNoPosition) return;
+ ASSERT(pos >= 0);
+ current_position_ = pos;
+}
+
+
+void Assembler::RecordStatementPosition(int pos) {
+ if (pos == RelocInfo::kNoPosition) return;
+ ASSERT(pos >= 0);
+ current_statement_position_ = pos;
+}
+
+
+void Assembler::WriteRecordedPositions() {
+ // Write the statement position if it is different from what was written last
+ // time.
+ if (current_statement_position_ != written_statement_position_) {
+ CheckBuffer();
+ RecordRelocInfo(RelocInfo::STATEMENT_POSITION, current_statement_position_);
+ written_statement_position_ = current_statement_position_;
+ }
+
+ // Write the position if it is different from what was written last time and
+ // also different from the written statement position.
+ if (current_position_ != written_position_ &&
+ current_position_ != written_statement_position_) {
+ CheckBuffer();
+ RecordRelocInfo(RelocInfo::POSITION, current_position_);
+ written_position_ = current_position_;
+ }
+}
+
+
+void Assembler::GrowBuffer() {
+ if (!own_buffer_) FATAL("external code buffer is too small");
+
+ // Compute new buffer size.
+ CodeDesc desc; // the new buffer
+ if (buffer_size_ < 4*KB) {
+ desc.buffer_size = 4*KB;
+ } else if (buffer_size_ < 1*MB) {
+ desc.buffer_size = 2*buffer_size_;
+ } else {
+ desc.buffer_size = buffer_size_ + 1*MB;
+ }
+ CHECK_GT(desc.buffer_size, 0); // no overflow
+
+ // Setup new buffer.
+ desc.buffer = NewArray<byte>(desc.buffer_size);
+
+ desc.instr_size = pc_offset();
+ desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
+
+ // Copy the data.
+ int pc_delta = desc.buffer - buffer_;
+ int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
+ memmove(desc.buffer, buffer_, desc.instr_size);
+ memmove(reloc_info_writer.pos() + rc_delta,
+ reloc_info_writer.pos(), desc.reloc_size);
+
+ // Switch buffers.
+ DeleteArray(buffer_);
+ buffer_ = desc.buffer;
+ buffer_size_ = desc.buffer_size;
+ pc_ += pc_delta;
+ reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
+ reloc_info_writer.last_pc() + pc_delta);
+
+
+ // On ia32 and ARM pc relative addressing is used, and we thus need to apply a
+ // shift by pc_delta. But on MIPS the target address it directly loaded, so
+ // we do not need to relocate here.
+
+ ASSERT(!overflow());
+}
+
+
+void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
+ RelocInfo rinfo(pc_, rmode, data); // we do not try to reuse pool constants
+ if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::STATEMENT_POSITION) {
+ // Adjust code for new modes.
+ ASSERT(RelocInfo::IsJSReturn(rmode)
+ || RelocInfo::IsComment(rmode)
+ || RelocInfo::IsPosition(rmode));
+ // These modes do not need an entry in the constant pool.
+ }
+ if (rinfo.rmode() != RelocInfo::NONE) {
+ // Don't record external references unless the heap will be serialized.
+ if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
+ !Serializer::enabled() &&
+ !FLAG_debug_code) {
+ return;
+ }
+ ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
+ reloc_info_writer.Write(&rinfo);
+ }
+}
+
+
+Address Assembler::target_address_at(Address pc) {
+ Instr instr1 = instr_at(pc);
+ Instr instr2 = instr_at(pc + kInstrSize);
+ // Check we have 2 instructions generated by li.
+ ASSERT(((instr1 & kOpcodeMask) == LUI && (instr2 & kOpcodeMask) == ORI) ||
+ ((instr1 == nopInstr) && ((instr2 & kOpcodeMask) == ADDI ||
+ (instr2 & kOpcodeMask) == ORI ||
+ (instr2 & kOpcodeMask) == LUI)));
+ // Interpret these 2 instructions.
+ if (instr1 == nopInstr) {
+ if ((instr2 & kOpcodeMask) == ADDI) {
+ return reinterpret_cast<Address>(((instr2 & kImm16Mask) << 16) >> 16);
+ } else if ((instr2 & kOpcodeMask) == ORI) {
+ return reinterpret_cast<Address>(instr2 & kImm16Mask);
+ } else if ((instr2 & kOpcodeMask) == LUI) {
+ return reinterpret_cast<Address>((instr2 & kImm16Mask) << 16);
+ }
+ } else if ((instr1 & kOpcodeMask) == LUI && (instr2 & kOpcodeMask) == ORI) {
+ // 32 bits value.
+ return reinterpret_cast<Address>(
+ (instr1 & kImm16Mask) << 16 | (instr2 & kImm16Mask));
+ }
+
+ // We should never get here.
+ UNREACHABLE();
+ return (Address)0x0;
+}
+
+
+void Assembler::set_target_address_at(Address pc, Address target) {
+ // On MIPS we need to patch the code to generate.
+
+ // First check we have a li.
+ Instr instr2 = instr_at(pc + kInstrSize);
+#ifdef DEBUG
+ Instr instr1 = instr_at(pc);
+
+ // Check we have indeed the result from a li with MustUseAt true.
+ CHECK(((instr1 & kOpcodeMask) == LUI && (instr2 & kOpcodeMask) == ORI) ||
+ ((instr1 == 0) && ((instr2 & kOpcodeMask)== ADDIU ||
+ (instr2 & kOpcodeMask)== ORI ||
+ (instr2 & kOpcodeMask)== LUI)));
+#endif
+
+
+ uint32_t rt_code = (instr2 & kRtFieldMask);
+ uint32_t* p = reinterpret_cast<uint32_t*>(pc);
+ uint32_t itarget = reinterpret_cast<uint32_t>(target);
+
+ if (is_int16(itarget)) {
+ // nop
+ // addiu rt zero_reg j
+ *p = nopInstr;
+ *(p+1) = ADDIU | rt_code | (itarget & LOMask);
+ } else if (!(itarget & HIMask)) {
+ // nop
+ // ori rt zero_reg j
+ *p = nopInstr;
+ *(p+1) = ORI | rt_code | (itarget & LOMask);
+ } else if (!(itarget & LOMask)) {
+ // nop
+ // lui rt (HIMask & itarget)>>16
+ *p = nopInstr;
+ *(p+1) = LUI | rt_code | ((itarget & HIMask)>>16);
+ } else {
+ // lui rt (HIMask & itarget)>>16
+ // ori rt rt, (LOMask & itarget)
+ *p = LUI | rt_code | ((itarget & HIMask)>>16);
+ *(p+1) = ORI | rt_code | (rt_code << 5) | (itarget & LOMask);
+ }
+
+ CPU::FlushICache(pc, 2 * sizeof(int32_t));
+}
+
+
+} } // namespace v8::internal
+
diff --git a/deps/v8/src/mips/assembler-mips.h b/deps/v8/src/mips/assembler-mips.h
new file mode 100644
index 000000000..4f5ae3ebe
--- /dev/null
+++ b/deps/v8/src/mips/assembler-mips.h
@@ -0,0 +1,663 @@
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2010 the V8 project authors. All rights reserved.
+
+
+#ifndef V8_MIPS_ASSEMBLER_MIPS_H_
+#define V8_MIPS_ASSEMBLER_MIPS_H_
+
+#include <stdio.h>
+#include "assembler.h"
+#include "constants-mips.h"
+#include "serialize.h"
+
+using namespace assembler::mips;
+
+namespace v8 {
+namespace internal {
+
+// CPU Registers.
+//
+// 1) We would prefer to use an enum, but enum values are assignment-
+// compatible with int, which has caused code-generation bugs.
+//
+// 2) We would prefer to use a class instead of a struct but we don't like
+// the register initialization to depend on the particular initialization
+// order (which appears to be different on OS X, Linux, and Windows for the
+// installed versions of C++ we tried). Using a struct permits C-style
+// "initialization". Also, the Register objects cannot be const as this
+// forces initialization stubs in MSVC, making us dependent on initialization
+// order.
+//
+// 3) By not using an enum, we are possibly preventing the compiler from
+// doing certain constant folds, which may significantly reduce the
+// code generated for some assembly instructions (because they boil down
+// to a few constants). If this is a problem, we could change the code
+// such that we use an enum in optimized mode, and the struct in debug
+// mode. This way we get the compile-time error checking in debug mode
+// and best performance in optimized code.
+
+
+// -----------------------------------------------------------------------------
+// Implementation of Register and FPURegister
+
+// Core register.
+struct Register {
+ bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
+ bool is(Register reg) const { return code_ == reg.code_; }
+ int code() const {
+ ASSERT(is_valid());
+ return code_;
+ }
+ int bit() const {
+ ASSERT(is_valid());
+ return 1 << code_;
+ }
+
+ // Unfortunately we can't make this private in a struct.
+ int code_;
+};
+
+extern const Register no_reg;
+
+extern const Register zero_reg;
+extern const Register at;
+extern const Register v0;
+extern const Register v1;
+extern const Register a0;
+extern const Register a1;
+extern const Register a2;
+extern const Register a3;
+extern const Register t0;
+extern const Register t1;
+extern const Register t2;
+extern const Register t3;
+extern const Register t4;
+extern const Register t5;
+extern const Register t6;
+extern const Register t7;
+extern const Register s0;
+extern const Register s1;
+extern const Register s2;
+extern const Register s3;
+extern const Register s4;
+extern const Register s5;
+extern const Register s6;
+extern const Register s7;
+extern const Register t8;
+extern const Register t9;
+extern const Register k0;
+extern const Register k1;
+extern const Register gp;
+extern const Register sp;
+extern const Register s8_fp;
+extern const Register ra;
+
+int ToNumber(Register reg);
+
+Register ToRegister(int num);
+
+// Coprocessor register.
+struct FPURegister {
+ bool is_valid() const { return 0 <= code_ && code_ < kNumFPURegister ; }
+ bool is(FPURegister creg) const { return code_ == creg.code_; }
+ int code() const {
+ ASSERT(is_valid());
+ return code_;
+ }
+ int bit() const {
+ ASSERT(is_valid());
+ return 1 << code_;
+ }
+
+ // Unfortunately we can't make this private in a struct.
+ int code_;
+};
+
+extern const FPURegister no_creg;
+
+extern const FPURegister f0;
+extern const FPURegister f1;
+extern const FPURegister f2;
+extern const FPURegister f3;
+extern const FPURegister f4;
+extern const FPURegister f5;
+extern const FPURegister f6;
+extern const FPURegister f7;
+extern const FPURegister f8;
+extern const FPURegister f9;
+extern const FPURegister f10;
+extern const FPURegister f11;
+extern const FPURegister f12; // arg
+extern const FPURegister f13;
+extern const FPURegister f14; // arg
+extern const FPURegister f15;
+extern const FPURegister f16;
+extern const FPURegister f17;
+extern const FPURegister f18;
+extern const FPURegister f19;
+extern const FPURegister f20;
+extern const FPURegister f21;
+extern const FPURegister f22;
+extern const FPURegister f23;
+extern const FPURegister f24;
+extern const FPURegister f25;
+extern const FPURegister f26;
+extern const FPURegister f27;
+extern const FPURegister f28;
+extern const FPURegister f29;
+extern const FPURegister f30;
+extern const FPURegister f31;
+
+
+// Returns the equivalent of !cc.
+// Negation of the default no_condition (-1) results in a non-default
+// no_condition value (-2). As long as tests for no_condition check
+// for condition < 0, this will work as expected.
+inline Condition NegateCondition(Condition cc);
+
+inline Condition ReverseCondition(Condition cc) {
+ switch (cc) {
+ case Uless:
+ return Ugreater;
+ case Ugreater:
+ return Uless;
+ case Ugreater_equal:
+ return Uless_equal;
+ case Uless_equal:
+ return Ugreater_equal;
+ case less:
+ return greater;
+ case greater:
+ return less;
+ case greater_equal:
+ return less_equal;
+ case less_equal:
+ return greater_equal;
+ default:
+ return cc;
+ };
+}
+
+
+enum Hint {
+ no_hint = 0
+};
+
+inline Hint NegateHint(Hint hint) {
+ return no_hint;
+}
+
+
+// -----------------------------------------------------------------------------
+// Machine instruction Operands.
+
+// Class Operand represents a shifter operand in data processing instructions.
+class Operand BASE_EMBEDDED {
+ public:
+ // Immediate.
+ INLINE(explicit Operand(int32_t immediate,
+ RelocInfo::Mode rmode = RelocInfo::NONE));
+ INLINE(explicit Operand(const ExternalReference& f));
+ INLINE(explicit Operand(const char* s));
+ INLINE(explicit Operand(Object** opp));
+ INLINE(explicit Operand(Context** cpp));
+ explicit Operand(Handle<Object> handle);
+ INLINE(explicit Operand(Smi* value));
+
+ // Register.
+ INLINE(explicit Operand(Register rm));
+
+ // Return true if this is a register operand.
+ INLINE(bool is_reg() const);
+
+ Register rm() const { return rm_; }
+
+ private:
+ Register rm_;
+ int32_t imm32_; // Valid if rm_ == no_reg
+ RelocInfo::Mode rmode_;
+
+ friend class Assembler;
+ friend class MacroAssembler;
+};
+
+
+// On MIPS we have only one adressing mode with base_reg + offset.
+// Class MemOperand represents a memory operand in load and store instructions.
+class MemOperand : public Operand {
+ public:
+
+ explicit MemOperand(Register rn, int16_t offset = 0);
+
+ private:
+ int16_t offset_;
+
+ friend class Assembler;
+};
+
+
+class Assembler : public Malloced {
+ public:
+ // Create an assembler. Instructions and relocation information are emitted
+ // into a buffer, with the instructions starting from the beginning and the
+ // relocation information starting from the end of the buffer. See CodeDesc
+ // for a detailed comment on the layout (globals.h).
+ //
+ // If the provided buffer is NULL, the assembler allocates and grows its own
+ // buffer, and buffer_size determines the initial buffer size. The buffer is
+ // owned by the assembler and deallocated upon destruction of the assembler.
+ //
+ // If the provided buffer is not NULL, the assembler uses the provided buffer
+ // for code generation and assumes its size to be buffer_size. If the buffer
+ // is too small, a fatal error occurs. No deallocation of the buffer is done
+ // upon destruction of the assembler.
+ Assembler(void* buffer, int buffer_size);
+ ~Assembler();
+
+ // GetCode emits any pending (non-emitted) code and fills the descriptor
+ // desc. GetCode() is idempotent; it returns the same result if no other
+ // Assembler functions are invoked in between GetCode() calls.
+ void GetCode(CodeDesc* desc);
+
+ // Label operations & relative jumps (PPUM Appendix D).
+ //
+ // Takes a branch opcode (cc) and a label (L) and generates
+ // either a backward branch or a forward branch and links it
+ // to the label fixup chain. Usage:
+ //
+ // Label L; // unbound label
+ // j(cc, &L); // forward branch to unbound label
+ // bind(&L); // bind label to the current pc
+ // j(cc, &L); // backward branch to bound label
+ // bind(&L); // illegal: a label may be bound only once
+ //
+ // Note: The same Label can be used for forward and backward branches
+ // but it may be bound only once.
+ void bind(Label* L); // binds an unbound label L to the current code position
+
+ // Returns the branch offset to the given label from the current code position
+ // Links the label to the current position if it is still unbound
+ // Manages the jump elimination optimization if the second parameter is true.
+ int32_t branch_offset(Label* L, bool jump_elimination_allowed);
+ int32_t shifted_branch_offset(Label* L, bool jump_elimination_allowed) {
+ int32_t o = branch_offset(L, jump_elimination_allowed);
+ ASSERT((o & 3) == 0); // Assert the offset is aligned.
+ return o >> 2;
+ }
+
+ // Puts a labels target address at the given position.
+ // The high 8 bits are set to zero.
+ void label_at_put(Label* L, int at_offset);
+
+ // Size of an instruction.
+ static const int kInstrSize = sizeof(Instr);
+
+ // Difference between address of current opcode and target address offset.
+ static const int kBranchPCOffset = 4;
+
+ // Read/Modify the code target address in the branch/call instruction at pc.
+ static Address target_address_at(Address pc);
+ static void set_target_address_at(Address pc, Address target);
+
+ // This sets the branch destination (which gets loaded at the call address).
+ // This is for calls and branches within generated code.
+ inline static void set_target_at(Address instruction_payload,
+ Address target) {
+ set_target_address_at(instruction_payload, target);
+ }
+
+ // This sets the branch destination.
+ // This is for calls and branches to runtime code.
+ inline static void set_external_target_at(Address instruction_payload,
+ Address target) {
+ set_target_address_at(instruction_payload, target);
+ }
+
+ static const int kCallTargetSize = 3 * kPointerSize;
+ static const int kExternalTargetSize = 3 * kPointerSize;
+
+ // Distance between the instruction referring to the address of the call
+ // target and the return address.
+ static const int kCallTargetAddressOffset = 4 * kInstrSize;
+
+ // Distance between start of patched return sequence and the emitted address
+ // to jump to.
+ static const int kPatchReturnSequenceAddressOffset = kInstrSize;
+
+
+ // ---------------------------------------------------------------------------
+ // Code generation.
+
+ void nop() { sll(zero_reg, zero_reg, 0); }
+
+
+ //------- Branch and jump instructions --------
+ // We don't use likely variant of instructions.
+ void b(int16_t offset);
+ void b(Label* L) { b(branch_offset(L, false)>>2); }
+ void bal(int16_t offset);
+ void bal(Label* L) { bal(branch_offset(L, false)>>2); }
+
+ void beq(Register rs, Register rt, int16_t offset);
+ void beq(Register rs, Register rt, Label* L) {
+ beq(rs, rt, branch_offset(L, false) >> 2);
+ }
+ void bgez(Register rs, int16_t offset);
+ void bgezal(Register rs, int16_t offset);
+ void bgtz(Register rs, int16_t offset);
+ void blez(Register rs, int16_t offset);
+ void bltz(Register rs, int16_t offset);
+ void bltzal(Register rs, int16_t offset);
+ void bne(Register rs, Register rt, int16_t offset);
+ void bne(Register rs, Register rt, Label* L) {
+ bne(rs, rt, branch_offset(L, false)>>2);
+ }
+
+ // Never use the int16_t b(l)cond version with a branch offset
+ // instead of using the Label* version. See Twiki for infos.
+
+ // Jump targets must be in the current 256 MB-aligned region. ie 28 bits.
+ void j(int32_t target);
+ void jal(int32_t target);
+ void jalr(Register rs, Register rd = ra);
+ void jr(Register target);
+
+
+ //-------Data-processing-instructions---------
+
+ // Arithmetic.
+ void add(Register rd, Register rs, Register rt);
+ void addu(Register rd, Register rs, Register rt);
+ void sub(Register rd, Register rs, Register rt);
+ void subu(Register rd, Register rs, Register rt);
+ void mult(Register rs, Register rt);
+ void multu(Register rs, Register rt);
+ void div(Register rs, Register rt);
+ void divu(Register rs, Register rt);
+ void mul(Register rd, Register rs, Register rt);
+
+ void addi(Register rd, Register rs, int32_t j);
+ void addiu(Register rd, Register rs, int32_t j);
+
+ // Logical.
+ void and_(Register rd, Register rs, Register rt);
+ void or_(Register rd, Register rs, Register rt);
+ void xor_(Register rd, Register rs, Register rt);
+ void nor(Register rd, Register rs, Register rt);
+
+ void andi(Register rd, Register rs, int32_t j);
+ void ori(Register rd, Register rs, int32_t j);
+ void xori(Register rd, Register rs, int32_t j);
+ void lui(Register rd, int32_t j);
+
+ // Shifts.
+ void sll(Register rd, Register rt, uint16_t sa);
+ void sllv(Register rd, Register rt, Register rs);
+ void srl(Register rd, Register rt, uint16_t sa);
+ void srlv(Register rd, Register rt, Register rs);
+ void sra(Register rt, Register rd, uint16_t sa);
+ void srav(Register rt, Register rd, Register rs);
+
+
+ //------------Memory-instructions-------------
+
+ void lb(Register rd, const MemOperand& rs);
+ void lbu(Register rd, const MemOperand& rs);
+ void lw(Register rd, const MemOperand& rs);
+ void sb(Register rd, const MemOperand& rs);
+ void sw(Register rd, const MemOperand& rs);
+
+
+ //-------------Misc-instructions--------------
+
+ // Break / Trap instructions.
+ void break_(uint32_t code);
+ void tge(Register rs, Register rt, uint16_t code);
+ void tgeu(Register rs, Register rt, uint16_t code);
+ void tlt(Register rs, Register rt, uint16_t code);
+ void tltu(Register rs, Register rt, uint16_t code);
+ void teq(Register rs, Register rt, uint16_t code);
+ void tne(Register rs, Register rt, uint16_t code);
+
+ // Move from HI/LO register.
+ void mfhi(Register rd);
+ void mflo(Register rd);
+
+ // Set on less than.
+ void slt(Register rd, Register rs, Register rt);
+ void sltu(Register rd, Register rs, Register rt);
+ void slti(Register rd, Register rs, int32_t j);
+ void sltiu(Register rd, Register rs, int32_t j);
+
+
+ //--------Coprocessor-instructions----------------
+
+ // Load, store, and move.
+ void lwc1(FPURegister fd, const MemOperand& src);
+ void ldc1(FPURegister fd, const MemOperand& src);
+
+ void swc1(FPURegister fs, const MemOperand& dst);
+ void sdc1(FPURegister fs, const MemOperand& dst);
+
+ // When paired with MTC1 to write a value to a 64-bit FPR, the MTC1 must be
+ // executed first, followed by the MTHC1.
+ void mtc1(FPURegister fs, Register rt);
+ void mthc1(FPURegister fs, Register rt);
+ void mfc1(FPURegister fs, Register rt);
+ void mfhc1(FPURegister fs, Register rt);
+
+ // Conversion.
+ void cvt_w_s(FPURegister fd, FPURegister fs);
+ void cvt_w_d(FPURegister fd, FPURegister fs);
+
+ void cvt_l_s(FPURegister fd, FPURegister fs);
+ void cvt_l_d(FPURegister fd, FPURegister fs);
+
+ void cvt_s_w(FPURegister fd, FPURegister fs);
+ void cvt_s_l(FPURegister fd, FPURegister fs);
+ void cvt_s_d(FPURegister fd, FPURegister fs);
+
+ void cvt_d_w(FPURegister fd, FPURegister fs);
+ void cvt_d_l(FPURegister fd, FPURegister fs);
+ void cvt_d_s(FPURegister fd, FPURegister fs);
+
+ // Conditions and branches.
+ void c(FPUCondition cond, SecondaryField fmt,
+ FPURegister ft, FPURegister fs, uint16_t cc = 0);
+
+ void bc1f(int16_t offset, uint16_t cc = 0);
+ void bc1f(Label* L, uint16_t cc = 0) { bc1f(branch_offset(L, false)>>2, cc); }
+ void bc1t(int16_t offset, uint16_t cc = 0);
+ void bc1t(Label* L, uint16_t cc = 0) { bc1t(branch_offset(L, false)>>2, cc); }
+
+
+ // Check the code size generated from label to here.
+ int InstructionsGeneratedSince(Label* l) {
+ return (pc_offset() - l->pos()) / kInstrSize;
+ }
+
+ // Debugging.
+
+ // Mark address of the ExitJSFrame code.
+ void RecordJSReturn();
+
+ // Record a comment relocation entry that can be used by a disassembler.
+ // Use --debug_code to enable.
+ void RecordComment(const char* msg);
+
+ void RecordPosition(int pos);
+ void RecordStatementPosition(int pos);
+ void WriteRecordedPositions();
+
+ int32_t pc_offset() const { return pc_ - buffer_; }
+ int32_t current_position() const { return current_position_; }
+ int32_t current_statement_position() const { return current_position_; }
+
+ // Check if there is less than kGap bytes available in the buffer.
+ // If this is the case, we need to grow the buffer before emitting
+ // an instruction or relocation information.
+ inline bool overflow() const { return pc_ >= reloc_info_writer.pos() - kGap; }
+
+ // Get the number of bytes available in the buffer.
+ inline int available_space() const { return reloc_info_writer.pos() - pc_; }
+
+ protected:
+ int32_t buffer_space() const { return reloc_info_writer.pos() - pc_; }
+
+ // Read/patch instructions.
+ static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
+ void instr_at_put(byte* pc, Instr instr) {
+ *reinterpret_cast<Instr*>(pc) = instr;
+ }
+ Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
+ void instr_at_put(int pos, Instr instr) {
+ *reinterpret_cast<Instr*>(buffer_ + pos) = instr;
+ }
+
+ // Check if an instruction is a branch of some kind.
+ bool is_branch(Instr instr);
+
+ // Decode branch instruction at pos and return branch target pos.
+ int target_at(int32_t pos);
+
+ // Patch branch instruction at pos to branch to given branch target pos.
+ void target_at_put(int32_t pos, int32_t target_pos);
+
+ // Say if we need to relocate with this mode.
+ bool MustUseAt(RelocInfo::Mode rmode);
+
+ // Record reloc info for current pc_.
+ void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
+
+ private:
+ // Code buffer:
+ // The buffer into which code and relocation info are generated.
+ byte* buffer_;
+ int buffer_size_;
+ // True if the assembler owns the buffer, false if buffer is external.
+ bool own_buffer_;
+
+ // Buffer size and constant pool distance are checked together at regular
+ // intervals of kBufferCheckInterval emitted bytes.
+ static const int kBufferCheckInterval = 1*KB/2;
+
+ // Code generation.
+ // The relocation writer's position is at least kGap bytes below the end of
+ // the generated instructions. This is so that multi-instruction sequences do
+ // not have to check for overflow. The same is true for writes of large
+ // relocation info entries.
+ static const int kGap = 32;
+ byte* pc_; // The program counter - moves forward.
+
+ // Relocation information generation.
+ // Each relocation is encoded as a variable size value.
+ static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
+ RelocInfoWriter reloc_info_writer;
+
+ // The bound position, before this we cannot do instruction elimination.
+ int last_bound_pos_;
+
+ // Source position information.
+ int current_position_;
+ int current_statement_position_;
+ int written_position_;
+ int written_statement_position_;
+
+ // Code emission.
+ inline void CheckBuffer();
+ void GrowBuffer();
+ inline void emit(Instr x);
+
+ // Instruction generation.
+ // We have 3 different kind of encoding layout on MIPS.
+ // However due to many different types of objects encoded in the same fields
+ // we have quite a few aliases for each mode.
+ // Using the same structure to refer to Register and FPURegister would spare a
+ // few aliases, but mixing both does not look clean to me.
+ // Anyway we could surely implement this differently.
+
+ void GenInstrRegister(Opcode opcode,
+ Register rs,
+ Register rt,
+ Register rd,
+ uint16_t sa = 0,
+ SecondaryField func = NULLSF);
+
+ void GenInstrRegister(Opcode opcode,
+ SecondaryField fmt,
+ FPURegister ft,
+ FPURegister fs,
+ FPURegister fd,
+ SecondaryField func = NULLSF);
+
+ void GenInstrRegister(Opcode opcode,
+ SecondaryField fmt,
+ Register rt,
+ FPURegister fs,
+ FPURegister fd,
+ SecondaryField func = NULLSF);
+
+
+ void GenInstrImmediate(Opcode opcode,
+ Register rs,
+ Register rt,
+ int32_t j);
+ void GenInstrImmediate(Opcode opcode,
+ Register rs,
+ SecondaryField SF,
+ int32_t j);
+ void GenInstrImmediate(Opcode opcode,
+ Register r1,
+ FPURegister r2,
+ int32_t j);
+
+
+ void GenInstrJump(Opcode opcode,
+ uint32_t address);
+
+
+ // Labels.
+ void print(Label* L);
+ void bind_to(Label* L, int pos);
+ void link_to(Label* L, Label* appendix);
+ void next(Label* L);
+
+ friend class RegExpMacroAssemblerMIPS;
+ friend class RelocInfo;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_ARM_ASSEMBLER_MIPS_H_
+
diff --git a/deps/v8/src/mips/builtins-mips.cc b/deps/v8/src/mips/builtins-mips.cc
new file mode 100644
index 000000000..3bd42ed6c
--- /dev/null
+++ b/deps/v8/src/mips/builtins-mips.cc
@@ -0,0 +1,109 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "debug.h"
+#include "runtime.h"
+
+namespace v8 {
+namespace internal {
+
+
+#define __ ACCESS_MASM(masm)
+
+
+void Builtins::Generate_Adaptor(MacroAssembler* masm,
+ CFunctionId id,
+ BuiltinExtraArguments extra_args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
+ bool is_construct) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
+ Generate_JSEntryTrampolineHelper(masm, false);
+}
+
+
+void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
+ Generate_JSEntryTrampolineHelper(masm, true);
+}
+
+
+void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+#undef __
+
+} } // namespace v8::internal
+
diff --git a/deps/v8/src/mips/codegen-mips-inl.h b/deps/v8/src/mips/codegen-mips-inl.h
new file mode 100644
index 000000000..2a77715a3
--- /dev/null
+++ b/deps/v8/src/mips/codegen-mips-inl.h
@@ -0,0 +1,56 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#ifndef V8_MIPS_CODEGEN_MIPS_INL_H_
+#define V8_MIPS_CODEGEN_MIPS_INL_H_
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm_)
+
+// Platform-specific inline functions.
+
+void DeferredCode::Jump() { __ b(&entry_label_); }
+
+void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+#undef __
+
+} } // namespace v8::internal
+
+#endif // V8_MIPS_CODEGEN_MIPS_INL_H_
+
diff --git a/deps/v8/src/mips/codegen-mips.cc b/deps/v8/src/mips/codegen-mips.cc
new file mode 100644
index 000000000..5a27c2864
--- /dev/null
+++ b/deps/v8/src/mips/codegen-mips.cc
@@ -0,0 +1,501 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#include "v8.h"
+
+#include "bootstrapper.h"
+#include "codegen-inl.h"
+#include "debug.h"
+#include "parser.h"
+#include "register-allocator-inl.h"
+#include "runtime.h"
+#include "scopes.h"
+#include "compiler.h"
+
+
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm_)
+
+
+
+// -------------------------------------------------------------------------
+// Platform-specific DeferredCode functions.
+
+
+void DeferredCode::SaveRegisters() {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void DeferredCode::RestoreRegisters() {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+// -------------------------------------------------------------------------
+// CodeGenerator implementation
+
+CodeGenerator::CodeGenerator(MacroAssembler* masm)
+ : deferred_(8),
+ masm_(masm),
+ scope_(NULL),
+ frame_(NULL),
+ allocator_(NULL),
+ cc_reg_(cc_always),
+ state_(NULL),
+ function_return_is_shadowed_(false) {
+}
+
+
+// Calling conventions:
+// s8_fp: caller's frame pointer
+// sp: stack pointer
+// a1: called JS function
+// cp: callee's context
+
+void CodeGenerator::Generate(CompilationInfo* info, Mode mode) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitBlock(Block* node) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitDeclaration(Declaration* node) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitIfStatement(IfStatement* node) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitForStatement(ForStatement* node) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitForInStatement(ForInStatement* node) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitFunctionBoilerplateLiteral(
+ FunctionBoilerplateLiteral* node) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitConditional(Conditional* node) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitSlot(Slot* node) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitLiteral(Literal* node) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitAssignment(Assignment* node) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitThrow(Throw* node) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitProperty(Property* node) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitCall(Call* node) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitCallNew(CallNew* node) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+// This should generate code that performs a charCodeAt() call or returns
+// undefined in order to trigger the slow case, Runtime_StringCharCodeAt.
+// It is not yet implemented on ARM, so it always goes to the slow case.
+void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateArgumentsAccess(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateRandomPositiveSmi(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitCountOperation(CountOperation* node) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitThisFunction(ThisFunction* node) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+#ifdef DEBUG
+bool CodeGenerator::HasValidEntryRegisters() { return true; }
+#endif
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+// On entry a0 and a1 are the things to be compared. On exit v0 is 0,
+// positive or negative to indicate the result of the comparison.
+void CompareStub::Generate(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+ __ break_(0x765);
+}
+
+
+void StackCheckStub::Generate(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+ __ break_(0x790);
+}
+
+
+void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+ __ break_(0x808);
+}
+
+
+void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
+ UncatchableExceptionType type) {
+ UNIMPLEMENTED_MIPS();
+ __ break_(0x815);
+}
+
+void CEntryStub::GenerateCore(MacroAssembler* masm,
+ Label* throw_normal_exception,
+ Label* throw_termination_exception,
+ Label* throw_out_of_memory_exception,
+ bool do_gc,
+ bool always_allocate) {
+ UNIMPLEMENTED_MIPS();
+ __ break_(0x826);
+}
+
+void CEntryStub::Generate(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+ __ break_(0x831);
+}
+
+void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
+ UNIMPLEMENTED_MIPS();
+ // Load a result.
+ __ li(v0, Operand(0x1234));
+ __ jr(ra);
+ // Return
+ __ nop();
+}
+
+
+// This stub performs an instanceof, calling the builtin function if
+// necessary. Uses a1 for the object, a0 for the function that it may
+// be an instance of (these are fetched from the stack).
+void InstanceofStub::Generate(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+ __ break_(0x845);
+}
+
+
+void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+ __ break_(0x851);
+}
+
+
+void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+ __ break_(0x857);
+}
+
+
+void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+ __ break_(0x863);
+}
+
+
+const char* CompareStub::GetName() {
+ UNIMPLEMENTED_MIPS();
+ return NULL; // UNIMPLEMENTED RETURN
+}
+
+
+int CompareStub::MinorKey() {
+ // Encode the two parameters in a unique 16 bit value.
+ ASSERT(static_cast<unsigned>(cc_) >> 28 < (1 << 15));
+ return (static_cast<unsigned>(cc_) >> 27) | (strict_ ? 1 : 0);
+}
+
+
+#undef __
+
+} } // namespace v8::internal
+
diff --git a/deps/v8/src/mips/codegen-mips.h b/deps/v8/src/mips/codegen-mips.h
new file mode 100644
index 000000000..05138bc64
--- /dev/null
+++ b/deps/v8/src/mips/codegen-mips.h
@@ -0,0 +1,311 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#ifndef V8_MIPS_CODEGEN_MIPS_H_
+#define V8_MIPS_CODEGEN_MIPS_H_
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations
+class CompilationInfo;
+class DeferredCode;
+class RegisterAllocator;
+class RegisterFile;
+
+enum InitState { CONST_INIT, NOT_CONST_INIT };
+enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
+
+
+// -------------------------------------------------------------------------
+// Code generation state
+
+// The state is passed down the AST by the code generator (and back up, in
+// the form of the state of the label pair). It is threaded through the
+// call stack. Constructing a state implicitly pushes it on the owning code
+// generator's stack of states, and destroying one implicitly pops it.
+
+class CodeGenState BASE_EMBEDDED {
+ public:
+ // Create an initial code generator state. Destroying the initial state
+ // leaves the code generator with a NULL state.
+ explicit CodeGenState(CodeGenerator* owner);
+
+ // Create a code generator state based on a code generator's current
+ // state. The new state has its own typeof state and pair of branch
+ // labels.
+ CodeGenState(CodeGenerator* owner,
+ JumpTarget* true_target,
+ JumpTarget* false_target);
+
+ // Destroy a code generator state and restore the owning code generator's
+ // previous state.
+ ~CodeGenState();
+
+ TypeofState typeof_state() const { return typeof_state_; }
+ JumpTarget* true_target() const { return true_target_; }
+ JumpTarget* false_target() const { return false_target_; }
+
+ private:
+ // The owning code generator.
+ CodeGenerator* owner_;
+
+ // A flag indicating whether we are compiling the immediate subexpression
+ // of a typeof expression.
+ TypeofState typeof_state_;
+
+ JumpTarget* true_target_;
+ JumpTarget* false_target_;
+
+ // The previous state of the owning code generator, restored when
+ // this state is destroyed.
+ CodeGenState* previous_;
+};
+
+
+
+// -------------------------------------------------------------------------
+// CodeGenerator
+
+class CodeGenerator: public AstVisitor {
+ public:
+ // Compilation mode. Either the compiler is used as the primary
+ // compiler and needs to setup everything or the compiler is used as
+ // the secondary compiler for split compilation and has to handle
+ // bailouts.
+ enum Mode {
+ PRIMARY,
+ SECONDARY
+ };
+
+ // Takes a function literal, generates code for it. This function should only
+ // be called by compiler.cc.
+ static Handle<Code> MakeCode(CompilationInfo* info);
+
+ // Printing of AST, etc. as requested by flags.
+ static void MakeCodePrologue(CompilationInfo* info);
+
+ // Allocate and install the code.
+ static Handle<Code> MakeCodeEpilogue(MacroAssembler* masm,
+ Code::Flags flags,
+ CompilationInfo* info);
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ static bool ShouldGenerateLog(Expression* type);
+#endif
+
+ static void SetFunctionInfo(Handle<JSFunction> fun,
+ FunctionLiteral* lit,
+ bool is_toplevel,
+ Handle<Script> script);
+
+ static void RecordPositions(MacroAssembler* masm, int pos);
+
+ // Accessors
+ MacroAssembler* masm() { return masm_; }
+ VirtualFrame* frame() const { return frame_; }
+ inline Handle<Script> script();
+
+ bool has_valid_frame() const { return frame_ != NULL; }
+
+ // Set the virtual frame to be new_frame, with non-frame register
+ // reference counts given by non_frame_registers. The non-frame
+ // register reference counts of the old frame are returned in
+ // non_frame_registers.
+ void SetFrame(VirtualFrame* new_frame, RegisterFile* non_frame_registers);
+
+ void DeleteFrame();
+
+ RegisterAllocator* allocator() const { return allocator_; }
+
+ CodeGenState* state() { return state_; }
+ void set_state(CodeGenState* state) { state_ = state; }
+
+ void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
+
+ static const int kUnknownIntValue = -1;
+
+ // Number of instructions used for the JS return sequence. The constant is
+ // used by the debugger to patch the JS return sequence.
+ static const int kJSReturnSequenceLength = 6;
+
+ private:
+ // Construction/Destruction.
+ explicit CodeGenerator(MacroAssembler* masm);
+ virtual ~CodeGenerator() { delete masm_; }
+
+ // Accessors.
+ inline bool is_eval();
+ Scope* scope() const { return scope_; }
+
+ // Generating deferred code.
+ void ProcessDeferred();
+
+ // State
+ bool has_cc() const { return cc_reg_ != cc_always; }
+ TypeofState typeof_state() const { return state_->typeof_state(); }
+ JumpTarget* true_target() const { return state_->true_target(); }
+ JumpTarget* false_target() const { return state_->false_target(); }
+
+ // We don't track loop nesting level on mips yet.
+ int loop_nesting() const { return 0; }
+
+ // Node visitors.
+ void VisitStatements(ZoneList<Statement*>* statements);
+
+#define DEF_VISIT(type) \
+ void Visit##type(type* node);
+ AST_NODE_LIST(DEF_VISIT)
+#undef DEF_VISIT
+
+ // Main code generation function
+ void Generate(CompilationInfo* info, Mode mode);
+
+ struct InlineRuntimeLUT {
+ void (CodeGenerator::*method)(ZoneList<Expression*>*);
+ const char* name;
+ };
+
+ static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle<String> name);
+ bool CheckForInlineRuntimeCall(CallRuntime* node);
+ static bool PatchInlineRuntimeEntry(Handle<String> name,
+ const InlineRuntimeLUT& new_entry,
+ InlineRuntimeLUT* old_entry);
+
+ static Handle<Code> ComputeLazyCompile(int argc);
+ void ProcessDeclarations(ZoneList<Declaration*>* declarations);
+
+ Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
+
+ // Declare global variables and functions in the given array of
+ // name/value pairs.
+ void DeclareGlobals(Handle<FixedArray> pairs);
+
+ // Support for type checks.
+ void GenerateIsSmi(ZoneList<Expression*>* args);
+ void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
+ void GenerateIsArray(ZoneList<Expression*>* args);
+
+ // Support for construct call checks.
+ void GenerateIsConstructCall(ZoneList<Expression*>* args);
+
+ // Support for arguments.length and arguments[?].
+ void GenerateArgumentsLength(ZoneList<Expression*>* args);
+ void GenerateArgumentsAccess(ZoneList<Expression*>* args);
+
+ // Support for accessing the class and value fields of an object.
+ void GenerateClassOf(ZoneList<Expression*>* args);
+ void GenerateValueOf(ZoneList<Expression*>* args);
+ void GenerateSetValueOf(ZoneList<Expression*>* args);
+
+ // Fast support for charCodeAt(n).
+ void GenerateFastCharCodeAt(ZoneList<Expression*>* args);
+
+ // Fast support for object equality testing.
+ void GenerateObjectEquals(ZoneList<Expression*>* args);
+
+ void GenerateLog(ZoneList<Expression*>* args);
+
+ // Fast support for Math.random().
+ void GenerateRandomPositiveSmi(ZoneList<Expression*>* args);
+
+ void GenerateIsObject(ZoneList<Expression*>* args);
+ void GenerateIsFunction(ZoneList<Expression*>* args);
+ void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
+ void GenerateStringAdd(ZoneList<Expression*>* args);
+ void GenerateSubString(ZoneList<Expression*>* args);
+ void GenerateStringCompare(ZoneList<Expression*>* args);
+ void GenerateRegExpExec(ZoneList<Expression*>* args);
+
+ // Fast support for Math.sin and Math.cos.
+ inline void GenerateMathSin(ZoneList<Expression*>* args);
+ inline void GenerateMathCos(ZoneList<Expression*>* args);
+
+ // Simple condition analysis.
+ enum ConditionAnalysis {
+ ALWAYS_TRUE,
+ ALWAYS_FALSE,
+ DONT_KNOW
+ };
+ ConditionAnalysis AnalyzeCondition(Expression* cond);
+
+ // Methods used to indicate which source code is generated for. Source
+ // positions are collected by the assembler and emitted with the relocation
+ // information.
+ void CodeForFunctionPosition(FunctionLiteral* fun);
+ void CodeForReturnPosition(FunctionLiteral* fun);
+ void CodeForStatementPosition(Statement* node);
+ void CodeForDoWhileConditionPosition(DoWhileStatement* stmt);
+ void CodeForSourcePosition(int pos);
+
+#ifdef DEBUG
+ // True if the registers are valid for entry to a block.
+ bool HasValidEntryRegisters();
+#endif
+
+ bool is_eval_; // Tells whether code is generated for eval.
+
+ Handle<Script> script_;
+ List<DeferredCode*> deferred_;
+
+ // Assembler
+ MacroAssembler* masm_; // to generate code
+
+ CompilationInfo* info_;
+
+ // Code generation state
+ Scope* scope_;
+ VirtualFrame* frame_;
+ RegisterAllocator* allocator_;
+ Condition cc_reg_;
+ CodeGenState* state_;
+
+ // Jump targets
+ BreakTarget function_return_;
+
+ // True if the function return is shadowed (ie, jumping to the target
+ // function_return_ does not jump to the true function return, but rather
+ // to some unlinking code).
+ bool function_return_is_shadowed_;
+
+ static InlineRuntimeLUT kInlineRuntimeLUT[];
+
+ friend class VirtualFrame;
+ friend class JumpTarget;
+ friend class Reference;
+ friend class FastCodeGenerator;
+ friend class FullCodeGenSyntaxChecker;
+
+ DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_MIPS_CODEGEN_MIPS_H_
+
diff --git a/deps/v8/src/mips/constants-mips.cc b/deps/v8/src/mips/constants-mips.cc
new file mode 100644
index 000000000..a5ef9f8e6
--- /dev/null
+++ b/deps/v8/src/mips/constants-mips.cc
@@ -0,0 +1,323 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+#include "constants-mips.h"
+
+namespace assembler {
+namespace mips {
+
+namespace v8i = v8::internal;
+
+
+// -----------------------------------------------------------------------------
+// Registers
+
+
+// These register names are defined in a way to match the native disassembler
+// formatting. See for example the command "objdump -d <binary file>".
+const char* Registers::names_[kNumSimuRegisters] = {
+ "zero_reg",
+ "at",
+ "v0", "v1",
+ "a0", "a1", "a2", "a3",
+ "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7",
+ "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
+ "t8", "t9",
+ "k0", "k1",
+ "gp",
+ "sp",
+ "fp",
+ "ra",
+ "LO", "HI",
+ "pc"
+};
+
+// List of alias names which can be used when referring to MIPS registers.
+const Registers::RegisterAlias Registers::aliases_[] = {
+ {0, "zero"},
+ {23, "cp"},
+ {30, "s8"},
+ {30, "s8_fp"},
+ {kInvalidRegister, NULL}
+};
+
+const char* Registers::Name(int reg) {
+ const char* result;
+ if ((0 <= reg) && (reg < kNumSimuRegisters)) {
+ result = names_[reg];
+ } else {
+ result = "noreg";
+ }
+ return result;
+}
+
+
+int Registers::Number(const char* name) {
+ // Look through the canonical names.
+ for (int i = 0; i < kNumSimuRegisters; i++) {
+ if (strcmp(names_[i], name) == 0) {
+ return i;
+ }
+ }
+
+ // Look through the alias names.
+ int i = 0;
+ while (aliases_[i].reg != kInvalidRegister) {
+ if (strcmp(aliases_[i].name, name) == 0) {
+ return aliases_[i].reg;
+ }
+ i++;
+ }
+
+ // No register with the reguested name found.
+ return kInvalidRegister;
+}
+
+
+const char* FPURegister::names_[kNumFPURegister] = {
+ "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10", "f11",
+ "f12", "f13", "f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21",
+ "f22", "f23", "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"
+};
+
+// List of alias names which can be used when referring to MIPS registers.
+const FPURegister::RegisterAlias FPURegister::aliases_[] = {
+ {kInvalidRegister, NULL}
+};
+
+const char* FPURegister::Name(int creg) {
+ const char* result;
+ if ((0 <= creg) && (creg < kNumFPURegister)) {
+ result = names_[creg];
+ } else {
+ result = "nocreg";
+ }
+ return result;
+}
+
+
+int FPURegister::Number(const char* name) {
+ // Look through the canonical names.
+ for (int i = 0; i < kNumSimuRegisters; i++) {
+ if (strcmp(names_[i], name) == 0) {
+ return i;
+ }
+ }
+
+ // Look through the alias names.
+ int i = 0;
+ while (aliases_[i].creg != kInvalidRegister) {
+ if (strcmp(aliases_[i].name, name) == 0) {
+ return aliases_[i].creg;
+ }
+ i++;
+ }
+
+ // No Cregister with the reguested name found.
+ return kInvalidFPURegister;
+}
+
+
+// -----------------------------------------------------------------------------
+// Instruction
+
+bool Instruction::IsForbiddenInBranchDelay() {
+ int op = OpcodeFieldRaw();
+ switch (op) {
+ case J:
+ case JAL:
+ case BEQ:
+ case BNE:
+ case BLEZ:
+ case BGTZ:
+ case BEQL:
+ case BNEL:
+ case BLEZL:
+ case BGTZL:
+ return true;
+ case REGIMM:
+ switch (RtFieldRaw()) {
+ case BLTZ:
+ case BGEZ:
+ case BLTZAL:
+ case BGEZAL:
+ return true;
+ default:
+ return false;
+ };
+ break;
+ case SPECIAL:
+ switch (FunctionFieldRaw()) {
+ case JR:
+ case JALR:
+ return true;
+ default:
+ return false;
+ };
+ break;
+ default:
+ return false;
+ };
+}
+
+
+bool Instruction::IsLinkingInstruction() {
+ int op = OpcodeFieldRaw();
+ switch (op) {
+ case JAL:
+ case BGEZAL:
+ case BLTZAL:
+ return true;
+ case SPECIAL:
+ switch (FunctionFieldRaw()) {
+ case JALR:
+ return true;
+ default:
+ return false;
+ };
+ default:
+ return false;
+ };
+}
+
+
+bool Instruction::IsTrap() {
+ if (OpcodeFieldRaw() != SPECIAL) {
+ return false;
+ } else {
+ switch (FunctionFieldRaw()) {
+ case BREAK:
+ case TGE:
+ case TGEU:
+ case TLT:
+ case TLTU:
+ case TEQ:
+ case TNE:
+ return true;
+ default:
+ return false;
+ };
+ }
+}
+
+
+Instruction::Type Instruction::InstructionType() const {
+ switch (OpcodeFieldRaw()) {
+ case SPECIAL:
+ switch (FunctionFieldRaw()) {
+ case JR:
+ case JALR:
+ case BREAK:
+ case SLL:
+ case SRL:
+ case SRA:
+ case SLLV:
+ case SRLV:
+ case SRAV:
+ case MFHI:
+ case MFLO:
+ case MULT:
+ case MULTU:
+ case DIV:
+ case DIVU:
+ case ADD:
+ case ADDU:
+ case SUB:
+ case SUBU:
+ case AND:
+ case OR:
+ case XOR:
+ case NOR:
+ case SLT:
+ case SLTU:
+ case TGE:
+ case TGEU:
+ case TLT:
+ case TLTU:
+ case TEQ:
+ case TNE:
+ return kRegisterType;
+ default:
+ UNREACHABLE();
+ };
+ break;
+ case SPECIAL2:
+ switch (FunctionFieldRaw()) {
+ case MUL:
+ return kRegisterType;
+ default:
+ UNREACHABLE();
+ };
+ break;
+ case COP1: // Coprocessor instructions
+ switch (FunctionFieldRaw()) {
+ case BC1: // branch on coprocessor condition
+ return kImmediateType;
+ default:
+ return kRegisterType;
+ };
+ break;
+ // 16 bits Immediate type instructions. eg: addi dest, src, imm16
+ case REGIMM:
+ case BEQ:
+ case BNE:
+ case BLEZ:
+ case BGTZ:
+ case ADDI:
+ case ADDIU:
+ case SLTI:
+ case SLTIU:
+ case ANDI:
+ case ORI:
+ case XORI:
+ case LUI:
+ case BEQL:
+ case BNEL:
+ case BLEZL:
+ case BGTZL:
+ case LB:
+ case LW:
+ case LBU:
+ case SB:
+ case SW:
+ case LWC1:
+ case LDC1:
+ case SWC1:
+ case SDC1:
+ return kImmediateType;
+ // 26 bits immediate type instructions. eg: j imm26
+ case J:
+ case JAL:
+ return kJumpType;
+ default:
+ UNREACHABLE();
+ };
+ return kUnsupported;
+}
+
+} } // namespace assembler::mips
diff --git a/deps/v8/src/mips/constants-mips.h b/deps/v8/src/mips/constants-mips.h
new file mode 100644
index 000000000..d0fdf88db
--- /dev/null
+++ b/deps/v8/src/mips/constants-mips.h
@@ -0,0 +1,525 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_MIPS_CONSTANTS_H_
+#define V8_MIPS_CONSTANTS_H_
+
+#include "checks.h"
+
+// UNIMPLEMENTED_ macro for MIPS.
+#define UNIMPLEMENTED_MIPS() \
+ v8::internal::PrintF("%s, \tline %d: \tfunction %s not implemented. \n", \
+ __FILE__, __LINE__, __func__)
+#define UNSUPPORTED_MIPS() v8::internal::PrintF("Unsupported instruction.\n")
+
+
+// Defines constants and accessor classes to assemble, disassemble and
+// simulate MIPS32 instructions.
+//
+// See: MIPS32 Architecture For Programmers
+// Volume II: The MIPS32 Instruction Set
+// Try www.cs.cornell.edu/courses/cs3410/2008fa/MIPS_Vol2.pdf.
+
+namespace assembler {
+namespace mips {
+
+// -----------------------------------------------------------------------------
+// Registers and FPURegister.
+
+// Number of general purpose registers.
+static const int kNumRegisters = 32;
+static const int kInvalidRegister = -1;
+
+// Number of registers with HI, LO, and pc.
+static const int kNumSimuRegisters = 35;
+
+// In the simulator, the PC register is simulated as the 34th register.
+static const int kPCRegister = 34;
+
+// Number coprocessor registers.
+static const int kNumFPURegister = 32;
+static const int kInvalidFPURegister = -1;
+
+// Helper functions for converting between register numbers and names.
+class Registers {
+ public:
+ // Return the name of the register.
+ static const char* Name(int reg);
+
+ // Lookup the register number for the name provided.
+ static int Number(const char* name);
+
+ struct RegisterAlias {
+ int reg;
+ const char *name;
+ };
+
+ static const int32_t kMaxValue = 0x7fffffff;
+ static const int32_t kMinValue = 0x80000000;
+
+ private:
+
+ static const char* names_[kNumSimuRegisters];
+ static const RegisterAlias aliases_[];
+};
+
+// Helper functions for converting between register numbers and names.
+class FPURegister {
+ public:
+ // Return the name of the register.
+ static const char* Name(int reg);
+
+ // Lookup the register number for the name provided.
+ static int Number(const char* name);
+
+ struct RegisterAlias {
+ int creg;
+ const char *name;
+ };
+
+ private:
+
+ static const char* names_[kNumFPURegister];
+ static const RegisterAlias aliases_[];
+};
+
+
+// -----------------------------------------------------------------------------
+// Instructions encoding constants.
+
+// On MIPS all instructions are 32 bits.
+typedef int32_t Instr;
+
+typedef unsigned char byte_;
+
+// Special Software Interrupt codes when used in the presence of the MIPS
+// simulator.
+enum SoftwareInterruptCodes {
+ // Transition to C code.
+ call_rt_redirected = 0xfffff
+};
+
+// ----- Fields offset and length.
+static const int kOpcodeShift = 26;
+static const int kOpcodeBits = 6;
+static const int kRsShift = 21;
+static const int kRsBits = 5;
+static const int kRtShift = 16;
+static const int kRtBits = 5;
+static const int kRdShift = 11;
+static const int kRdBits = 5;
+static const int kSaShift = 6;
+static const int kSaBits = 5;
+static const int kFunctionShift = 0;
+static const int kFunctionBits = 6;
+
+static const int kImm16Shift = 0;
+static const int kImm16Bits = 16;
+static const int kImm26Shift = 0;
+static const int kImm26Bits = 26;
+
+static const int kFsShift = 11;
+static const int kFsBits = 5;
+static const int kFtShift = 16;
+static const int kFtBits = 5;
+
+// ----- Miscellianous useful masks.
+// Instruction bit masks.
+static const int kOpcodeMask = ((1 << kOpcodeBits) - 1) << kOpcodeShift;
+static const int kImm16Mask = ((1 << kImm16Bits) - 1) << kImm16Shift;
+static const int kImm26Mask = ((1 << kImm26Bits) - 1) << kImm26Shift;
+static const int kRsFieldMask = ((1 << kRsBits) - 1) << kRsShift;
+static const int kRtFieldMask = ((1 << kRtBits) - 1) << kRtShift;
+static const int kRdFieldMask = ((1 << kRdBits) - 1) << kRdShift;
+static const int kSaFieldMask = ((1 << kSaBits) - 1) << kSaShift;
+static const int kFunctionFieldMask =
+ ((1 << kFunctionBits) - 1) << kFunctionShift;
+// Misc masks.
+static const int HIMask = 0xffff << 16;
+static const int LOMask = 0xffff;
+static const int signMask = 0x80000000;
+
+
+// ----- MIPS Opcodes and Function Fields.
+// We use this presentation to stay close to the table representation in
+// MIPS32 Architecture For Programmers, Volume II: The MIPS32 Instruction Set.
+enum Opcode {
+ SPECIAL = 0 << kOpcodeShift,
+ REGIMM = 1 << kOpcodeShift,
+
+ J = ((0 << 3) + 2) << kOpcodeShift,
+ JAL = ((0 << 3) + 3) << kOpcodeShift,
+ BEQ = ((0 << 3) + 4) << kOpcodeShift,
+ BNE = ((0 << 3) + 5) << kOpcodeShift,
+ BLEZ = ((0 << 3) + 6) << kOpcodeShift,
+ BGTZ = ((0 << 3) + 7) << kOpcodeShift,
+
+ ADDI = ((1 << 3) + 0) << kOpcodeShift,
+ ADDIU = ((1 << 3) + 1) << kOpcodeShift,
+ SLTI = ((1 << 3) + 2) << kOpcodeShift,
+ SLTIU = ((1 << 3) + 3) << kOpcodeShift,
+ ANDI = ((1 << 3) + 4) << kOpcodeShift,
+ ORI = ((1 << 3) + 5) << kOpcodeShift,
+ XORI = ((1 << 3) + 6) << kOpcodeShift,
+ LUI = ((1 << 3) + 7) << kOpcodeShift,
+
+ COP1 = ((2 << 3) + 1) << kOpcodeShift, // Coprocessor 1 class
+ BEQL = ((2 << 3) + 4) << kOpcodeShift,
+ BNEL = ((2 << 3) + 5) << kOpcodeShift,
+ BLEZL = ((2 << 3) + 6) << kOpcodeShift,
+ BGTZL = ((2 << 3) + 7) << kOpcodeShift,
+
+ SPECIAL2 = ((3 << 3) + 4) << kOpcodeShift,
+
+ LB = ((4 << 3) + 0) << kOpcodeShift,
+ LW = ((4 << 3) + 3) << kOpcodeShift,
+ LBU = ((4 << 3) + 4) << kOpcodeShift,
+ SB = ((5 << 3) + 0) << kOpcodeShift,
+ SW = ((5 << 3) + 3) << kOpcodeShift,
+
+ LWC1 = ((6 << 3) + 1) << kOpcodeShift,
+ LDC1 = ((6 << 3) + 5) << kOpcodeShift,
+
+ SWC1 = ((7 << 3) + 1) << kOpcodeShift,
+ SDC1 = ((7 << 3) + 5) << kOpcodeShift
+};
+
+enum SecondaryField {
+ // SPECIAL Encoding of Function Field.
+ SLL = ((0 << 3) + 0),
+ SRL = ((0 << 3) + 2),
+ SRA = ((0 << 3) + 3),
+ SLLV = ((0 << 3) + 4),
+ SRLV = ((0 << 3) + 6),
+ SRAV = ((0 << 3) + 7),
+
+ JR = ((1 << 3) + 0),
+ JALR = ((1 << 3) + 1),
+ BREAK = ((1 << 3) + 5),
+
+ MFHI = ((2 << 3) + 0),
+ MFLO = ((2 << 3) + 2),
+
+ MULT = ((3 << 3) + 0),
+ MULTU = ((3 << 3) + 1),
+ DIV = ((3 << 3) + 2),
+ DIVU = ((3 << 3) + 3),
+
+ ADD = ((4 << 3) + 0),
+ ADDU = ((4 << 3) + 1),
+ SUB = ((4 << 3) + 2),
+ SUBU = ((4 << 3) + 3),
+ AND = ((4 << 3) + 4),
+ OR = ((4 << 3) + 5),
+ XOR = ((4 << 3) + 6),
+ NOR = ((4 << 3) + 7),
+
+ SLT = ((5 << 3) + 2),
+ SLTU = ((5 << 3) + 3),
+
+ TGE = ((6 << 3) + 0),
+ TGEU = ((6 << 3) + 1),
+ TLT = ((6 << 3) + 2),
+ TLTU = ((6 << 3) + 3),
+ TEQ = ((6 << 3) + 4),
+ TNE = ((6 << 3) + 6),
+
+ // SPECIAL2 Encoding of Function Field.
+ MUL = ((0 << 3) + 2),
+
+ // REGIMM encoding of rt Field.
+ BLTZ = ((0 << 3) + 0) << 16,
+ BGEZ = ((0 << 3) + 1) << 16,
+ BLTZAL = ((2 << 3) + 0) << 16,
+ BGEZAL = ((2 << 3) + 1) << 16,
+
+ // COP1 Encoding of rs Field.
+ MFC1 = ((0 << 3) + 0) << 21,
+ MFHC1 = ((0 << 3) + 3) << 21,
+ MTC1 = ((0 << 3) + 4) << 21,
+ MTHC1 = ((0 << 3) + 7) << 21,
+ BC1 = ((1 << 3) + 0) << 21,
+ S = ((2 << 3) + 0) << 21,
+ D = ((2 << 3) + 1) << 21,
+ W = ((2 << 3) + 4) << 21,
+ L = ((2 << 3) + 5) << 21,
+ PS = ((2 << 3) + 6) << 21,
+ // COP1 Encoding of Function Field When rs=S.
+ CVT_D_S = ((4 << 3) + 1),
+ CVT_W_S = ((4 << 3) + 4),
+ CVT_L_S = ((4 << 3) + 5),
+ CVT_PS_S = ((4 << 3) + 6),
+ // COP1 Encoding of Function Field When rs=D.
+ CVT_S_D = ((4 << 3) + 0),
+ CVT_W_D = ((4 << 3) + 4),
+ CVT_L_D = ((4 << 3) + 5),
+ // COP1 Encoding of Function Field When rs=W or L.
+ CVT_S_W = ((4 << 3) + 0),
+ CVT_D_W = ((4 << 3) + 1),
+ CVT_S_L = ((4 << 3) + 0),
+ CVT_D_L = ((4 << 3) + 1),
+ // COP1 Encoding of Function Field When rs=PS.
+
+ NULLSF = 0
+};
+
+
+// ----- Emulated conditions.
+// On MIPS we use this enum to abstract from conditionnal branch instructions.
+// the 'U' prefix is used to specify unsigned comparisons.
+enum Condition {
+ // Any value < 0 is considered no_condition.
+ no_condition = -1,
+
+ overflow = 0,
+ no_overflow = 1,
+ Uless = 2,
+ Ugreater_equal= 3,
+ equal = 4,
+ not_equal = 5,
+ Uless_equal = 6,
+ Ugreater = 7,
+ negative = 8,
+ positive = 9,
+ parity_even = 10,
+ parity_odd = 11,
+ less = 12,
+ greater_equal = 13,
+ less_equal = 14,
+ greater = 15,
+
+ cc_always = 16,
+
+ // aliases
+ carry = Uless,
+ not_carry = Ugreater_equal,
+ zero = equal,
+ eq = equal,
+ not_zero = not_equal,
+ ne = not_equal,
+ sign = negative,
+ not_sign = positive,
+
+ cc_default = no_condition
+};
+
+// ----- Coprocessor conditions.
+enum FPUCondition {
+ F, // False
+ UN, // Unordered
+ EQ, // Equal
+ UEQ, // Unordered or Equal
+ OLT, // Ordered or Less Than
+ ULT, // Unordered or Less Than
+ OLE, // Ordered or Less Than or Equal
+ ULE // Unordered or Less Than or Equal
+};
+
+
+// Break 0xfffff, reserved for redirected real time call.
+const Instr rtCallRedirInstr = SPECIAL | BREAK | call_rt_redirected << 6;
+// A nop instruction. (Encoding of sll 0 0 0).
+const Instr nopInstr = 0;
+
+class Instruction {
+ public:
+ enum {
+ kInstructionSize = 4,
+ kInstructionSizeLog2 = 2,
+ // On MIPS PC cannot actually be directly accessed. We behave as if PC was
+ // always the value of the current instruction being exectued.
+ kPCReadOffset = 0
+ };
+
+ // Get the raw instruction bits.
+ inline Instr InstructionBits() const {
+ return *reinterpret_cast<const Instr*>(this);
+ }
+
+ // Set the raw instruction bits to value.
+ inline void SetInstructionBits(Instr value) {
+ *reinterpret_cast<Instr*>(this) = value;
+ }
+
+ // Read one particular bit out of the instruction bits.
+ inline int Bit(int nr) const {
+ return (InstructionBits() >> nr) & 1;
+ }
+
+ // Read a bit field out of the instruction bits.
+ inline int Bits(int hi, int lo) const {
+ return (InstructionBits() >> lo) & ((2 << (hi - lo)) - 1);
+ }
+
+ // Instruction type.
+ enum Type {
+ kRegisterType,
+ kImmediateType,
+ kJumpType,
+ kUnsupported = -1
+ };
+
+ // Get the encoding type of the instruction.
+ Type InstructionType() const;
+
+
+ // Accessors for the different named fields used in the MIPS encoding.
+ inline Opcode OpcodeField() const {
+ return static_cast<Opcode>(
+ Bits(kOpcodeShift + kOpcodeBits - 1, kOpcodeShift));
+ }
+
+ inline int RsField() const {
+ ASSERT(InstructionType() == kRegisterType ||
+ InstructionType() == kImmediateType);
+ return Bits(kRsShift + kRsBits - 1, kRsShift);
+ }
+
+ inline int RtField() const {
+ ASSERT(InstructionType() == kRegisterType ||
+ InstructionType() == kImmediateType);
+ return Bits(kRtShift + kRtBits - 1, kRtShift);
+ }
+
+ inline int RdField() const {
+ ASSERT(InstructionType() == kRegisterType);
+ return Bits(kRdShift + kRdBits - 1, kRdShift);
+ }
+
+ inline int SaField() const {
+ ASSERT(InstructionType() == kRegisterType);
+ return Bits(kSaShift + kSaBits - 1, kSaShift);
+ }
+
+ inline int FunctionField() const {
+ ASSERT(InstructionType() == kRegisterType ||
+ InstructionType() == kImmediateType);
+ return Bits(kFunctionShift + kFunctionBits - 1, kFunctionShift);
+ }
+
+ inline int FsField() const {
+ return Bits(kFsShift + kRsBits - 1, kFsShift);
+ }
+
+ inline int FtField() const {
+ return Bits(kFtShift + kRsBits - 1, kFtShift);
+ }
+
+ // Return the fields at their original place in the instruction encoding.
+ inline Opcode OpcodeFieldRaw() const {
+ return static_cast<Opcode>(InstructionBits() & kOpcodeMask);
+ }
+
+ inline int RsFieldRaw() const {
+ ASSERT(InstructionType() == kRegisterType ||
+ InstructionType() == kImmediateType);
+ return InstructionBits() & kRsFieldMask;
+ }
+
+ inline int RtFieldRaw() const {
+ ASSERT(InstructionType() == kRegisterType ||
+ InstructionType() == kImmediateType);
+ return InstructionBits() & kRtFieldMask;
+ }
+
+ inline int RdFieldRaw() const {
+ ASSERT(InstructionType() == kRegisterType);
+ return InstructionBits() & kRdFieldMask;
+ }
+
+ inline int SaFieldRaw() const {
+ ASSERT(InstructionType() == kRegisterType);
+ return InstructionBits() & kSaFieldMask;
+ }
+
+ inline int FunctionFieldRaw() const {
+ return InstructionBits() & kFunctionFieldMask;
+ }
+
+ // Get the secondary field according to the opcode.
+ inline int SecondaryField() const {
+ Opcode op = OpcodeFieldRaw();
+ switch (op) {
+ case SPECIAL:
+ case SPECIAL2:
+ return FunctionField();
+ case COP1:
+ return RsField();
+ case REGIMM:
+ return RtField();
+ default:
+ return NULLSF;
+ }
+ }
+
+ inline int32_t Imm16Field() const {
+ ASSERT(InstructionType() == kImmediateType);
+ return Bits(kImm16Shift + kImm16Bits - 1, kImm16Shift);
+ }
+
+ inline int32_t Imm26Field() const {
+ ASSERT(InstructionType() == kJumpType);
+ return Bits(kImm16Shift + kImm26Bits - 1, kImm26Shift);
+ }
+
+ // Say if the instruction should not be used in a branch delay slot.
+ bool IsForbiddenInBranchDelay();
+ // Say if the instruction 'links'. eg: jal, bal.
+ bool IsLinkingInstruction();
+ // Say if the instruction is a break or a trap.
+ bool IsTrap();
+
+ // Instructions are read of out a code stream. The only way to get a
+ // reference to an instruction is to convert a pointer. There is no way
+ // to allocate or create instances of class Instruction.
+ // Use the At(pc) function to create references to Instruction.
+ static Instruction* At(byte_* pc) {
+ return reinterpret_cast<Instruction*>(pc);
+ }
+
+ private:
+ // We need to prevent the creation of instances of class Instruction.
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Instruction);
+};
+
+
+// -----------------------------------------------------------------------------
+// MIPS assembly various constants.
+
+static const int kArgsSlotsSize = 4 * Instruction::kInstructionSize;
+static const int kArgsSlotsNum = 4;
+
+static const int kBranchReturnOffset = 2 * Instruction::kInstructionSize;
+
+static const int kDoubleAlignment = 2 * 8;
+static const int kDoubleAlignmentMask = kDoubleAlignmentMask - 1;
+
+
+} } // namespace assembler::mips
+
+#endif // #ifndef V8_MIPS_CONSTANTS_H_
+
diff --git a/deps/v8/src/mips/cpu-mips.cc b/deps/v8/src/mips/cpu-mips.cc
new file mode 100644
index 000000000..f592257e0
--- /dev/null
+++ b/deps/v8/src/mips/cpu-mips.cc
@@ -0,0 +1,69 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// CPU specific code for arm independent of OS goes here.
+
+#include <sys/syscall.h>
+#include <unistd.h>
+
+#ifdef __mips
+#include <asm/cachectl.h>
+#endif // #ifdef __mips
+
+#include "v8.h"
+#include "cpu.h"
+
+namespace v8 {
+namespace internal {
+
+void CPU::Setup() {
+ // Nothing to do.
+}
+
+void CPU::FlushICache(void* start, size_t size) {
+#ifdef __mips
+ int res;
+
+ // See http://www.linux-mips.org/wiki/Cacheflush_Syscall
+ res = syscall(__NR_cacheflush, start, size, ICACHE);
+
+ if (res) {
+ V8_Fatal(__FILE__, __LINE__, "Failed to flush the instruction cache");
+ }
+
+#endif // #ifdef __mips
+}
+
+
+void CPU::DebugBreak() {
+#ifdef __mips
+ asm volatile("break");
+#endif // #ifdef __mips
+}
+
+} } // namespace v8::internal
+
diff --git a/deps/v8/src/mips/debug-mips.cc b/deps/v8/src/mips/debug-mips.cc
new file mode 100644
index 000000000..772bcc013
--- /dev/null
+++ b/deps/v8/src/mips/debug-mips.cc
@@ -0,0 +1,112 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "debug.h"
+
+namespace v8 {
+namespace internal {
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+bool BreakLocationIterator::IsDebugBreakAtReturn() {
+ return Debug::IsDebugBreakAtReturn(rinfo());
+}
+
+
+void BreakLocationIterator::SetDebugBreakAtReturn() {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+// Restore the JS frame exit code.
+void BreakLocationIterator::ClearDebugBreakAtReturn() {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+// A debug break in the exit code is identified by a call.
+bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
+ ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
+ return rinfo->IsPatchedReturnSequence();
+}
+
+
+#define __ ACCESS_MASM(masm)
+
+
+
+
+void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void Debug::GenerateConstructCallDebugBreak(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+#undef __
+
+#endif // ENABLE_DEBUGGER_SUPPORT
+
+} } // namespace v8::internal
+
diff --git a/deps/v8/src/mips/disasm-mips.cc b/deps/v8/src/mips/disasm-mips.cc
new file mode 100644
index 000000000..cab72d1db
--- /dev/null
+++ b/deps/v8/src/mips/disasm-mips.cc
@@ -0,0 +1,784 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// A Disassembler object is used to disassemble a block of code instruction by
+// instruction. The default implementation of the NameConverter object can be
+// overriden to modify register names or to do symbol lookup on addresses.
+//
+// The example below will disassemble a block of code and print it to stdout.
+//
+// NameConverter converter;
+// Disassembler d(converter);
+// for (byte_* pc = begin; pc < end;) {
+// char buffer[128];
+// buffer[0] = '\0';
+// byte_* prev_pc = pc;
+// pc += d.InstructionDecode(buffer, sizeof buffer, pc);
+// printf("%p %08x %s\n",
+// prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer);
+// }
+//
+// The Disassembler class also has a convenience method to disassemble a block
+// of code into a FILE*, meaning that the above functionality could also be
+// achieved by just calling Disassembler::Disassemble(stdout, begin, end);
+
+
+#include <assert.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#ifndef WIN32
+#include <stdint.h>
+#endif
+
+#include "v8.h"
+
+#include "constants-mips.h"
+#include "disasm.h"
+#include "macro-assembler.h"
+#include "platform.h"
+
+namespace assembler {
+namespace mips {
+
+
+namespace v8i = v8::internal;
+
+
+//------------------------------------------------------------------------------
+
+// Decoder decodes and disassembles instructions into an output buffer.
+// It uses the converter to convert register names and call destinations into
+// more informative description.
+class Decoder {
+ public:
+ Decoder(const disasm::NameConverter& converter,
+ v8::internal::Vector<char> out_buffer)
+ : converter_(converter),
+ out_buffer_(out_buffer),
+ out_buffer_pos_(0) {
+ out_buffer_[out_buffer_pos_] = '\0';
+ }
+
+ ~Decoder() {}
+
+ // Writes one disassembled instruction into 'buffer' (0-terminated).
+ // Returns the length of the disassembled machine instruction in bytes.
+ int InstructionDecode(byte_* instruction);
+
+ private:
+ // Bottleneck functions to print into the out_buffer.
+ void PrintChar(const char ch);
+ void Print(const char* str);
+
+ // Printing of common values.
+ void PrintRegister(int reg);
+ void PrintCRegister(int creg);
+ void PrintRs(Instruction* instr);
+ void PrintRt(Instruction* instr);
+ void PrintRd(Instruction* instr);
+ void PrintFs(Instruction* instr);
+ void PrintFt(Instruction* instr);
+ void PrintFd(Instruction* instr);
+ void PrintSa(Instruction* instr);
+ void PrintFunction(Instruction* instr);
+ void PrintSecondaryField(Instruction* instr);
+ void PrintUImm16(Instruction* instr);
+ void PrintSImm16(Instruction* instr);
+ void PrintXImm16(Instruction* instr);
+ void PrintImm26(Instruction* instr);
+ void PrintCode(Instruction* instr); // For break and trap instructions.
+ // Printing of instruction name.
+ void PrintInstructionName(Instruction* instr);
+
+ // Handle formatting of instructions and their options.
+ int FormatRegister(Instruction* instr, const char* option);
+ int FormatCRegister(Instruction* instr, const char* option);
+ int FormatOption(Instruction* instr, const char* option);
+ void Format(Instruction* instr, const char* format);
+ void Unknown(Instruction* instr);
+
+ // Each of these functions decodes one particular instruction type.
+ void DecodeTypeRegister(Instruction* instr);
+ void DecodeTypeImmediate(Instruction* instr);
+ void DecodeTypeJump(Instruction* instr);
+
+ const disasm::NameConverter& converter_;
+ v8::internal::Vector<char> out_buffer_;
+ int out_buffer_pos_;
+
+ DISALLOW_COPY_AND_ASSIGN(Decoder);
+};
+
+
+// Support for assertions in the Decoder formatting functions.
+#define STRING_STARTS_WITH(string, compare_string) \
+ (strncmp(string, compare_string, strlen(compare_string)) == 0)
+
+
+// Append the ch to the output buffer.
+void Decoder::PrintChar(const char ch) {
+ out_buffer_[out_buffer_pos_++] = ch;
+}
+
+
+// Append the str to the output buffer.
+void Decoder::Print(const char* str) {
+ char cur = *str++;
+ while (cur != '\0' && (out_buffer_pos_ < (out_buffer_.length() - 1))) {
+ PrintChar(cur);
+ cur = *str++;
+ }
+ out_buffer_[out_buffer_pos_] = 0;
+}
+
+
+// Print the register name according to the active name converter.
+void Decoder::PrintRegister(int reg) {
+ Print(converter_.NameOfCPURegister(reg));
+}
+
+
+void Decoder::PrintRs(Instruction* instr) {
+ int reg = instr->RsField();
+ PrintRegister(reg);
+}
+
+
+void Decoder::PrintRt(Instruction* instr) {
+ int reg = instr->RtField();
+ PrintRegister(reg);
+}
+
+
+void Decoder::PrintRd(Instruction* instr) {
+ int reg = instr->RdField();
+ PrintRegister(reg);
+}
+
+
+// Print the Cregister name according to the active name converter.
+void Decoder::PrintCRegister(int creg) {
+ Print(converter_.NameOfXMMRegister(creg));
+}
+
+
+void Decoder::PrintFs(Instruction* instr) {
+ int creg = instr->RsField();
+ PrintCRegister(creg);
+}
+
+
+void Decoder::PrintFt(Instruction* instr) {
+ int creg = instr->RtField();
+ PrintCRegister(creg);
+}
+
+
+void Decoder::PrintFd(Instruction* instr) {
+ int creg = instr->RdField();
+ PrintCRegister(creg);
+}
+
+
+// Print the integer value of the sa field.
+void Decoder::PrintSa(Instruction* instr) {
+ int sa = instr->SaField();
+ out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%d", sa);
+}
+
+
+// Print 16-bit unsigned immediate value.
+void Decoder::PrintUImm16(Instruction* instr) {
+ int32_t imm = instr->Imm16Field();
+ out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%u", imm);
+}
+
+
+// Print 16-bit signed immediate value.
+void Decoder::PrintSImm16(Instruction* instr) {
+ int32_t imm = ((instr->Imm16Field())<<16)>>16;
+ out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%d", imm);
+}
+
+
+// Print 16-bit hexa immediate value.
+void Decoder::PrintXImm16(Instruction* instr) {
+ int32_t imm = instr->Imm16Field();
+ out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "0x%x", imm);
+}
+
+
+// Print 26-bit immediate value.
+void Decoder::PrintImm26(Instruction* instr) {
+ int32_t imm = instr->Imm26Field();
+ out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%d", imm);
+}
+
+
+// Print 26-bit immediate value.
+void Decoder::PrintCode(Instruction* instr) {
+ if (instr->OpcodeFieldRaw() != SPECIAL)
+ return; // Not a break or trap instruction.
+ switch (instr->FunctionFieldRaw()) {
+ case BREAK: {
+ int32_t code = instr->Bits(25, 6);
+ out_buffer_pos_ +=
+ v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%05x", code);
+ break;
+ }
+ case TGE:
+ case TGEU:
+ case TLT:
+ case TLTU:
+ case TEQ:
+ case TNE: {
+ int32_t code = instr->Bits(15, 6);
+ out_buffer_pos_ +=
+ v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%03x", code);
+ break;
+ }
+ default: // Not a break or trap instruction.
+ break;
+ };
+}
+
+
+// Printing of instruction name.
+void Decoder::PrintInstructionName(Instruction* instr) {
+}
+
+
+// Handle all register based formatting in this function to reduce the
+// complexity of FormatOption.
+int Decoder::FormatRegister(Instruction* instr, const char* format) {
+ ASSERT(format[0] == 'r');
+ if (format[1] == 's') { // 'rs: Rs register
+ int reg = instr->RsField();
+ PrintRegister(reg);
+ return 2;
+ } else if (format[1] == 't') { // 'rt: rt register
+ int reg = instr->RtField();
+ PrintRegister(reg);
+ return 2;
+ } else if (format[1] == 'd') { // 'rd: rd register
+ int reg = instr->RdField();
+ PrintRegister(reg);
+ return 2;
+ }
+ UNREACHABLE();
+ return -1;
+}
+
+
+// Handle all Cregister based formatting in this function to reduce the
+// complexity of FormatOption.
+int Decoder::FormatCRegister(Instruction* instr, const char* format) {
+ ASSERT(format[0] == 'f');
+ if (format[1] == 's') { // 'fs: fs register
+ int reg = instr->RsField();
+ PrintCRegister(reg);
+ return 2;
+ } else if (format[1] == 't') { // 'ft: ft register
+ int reg = instr->RtField();
+ PrintCRegister(reg);
+ return 2;
+ } else if (format[1] == 'd') { // 'fd: fd register
+ int reg = instr->RdField();
+ PrintCRegister(reg);
+ return 2;
+ }
+ UNREACHABLE();
+ return -1;
+}
+
+
+// FormatOption takes a formatting string and interprets it based on
+// the current instructions. The format string points to the first
+// character of the option string (the option escape has already been
+// consumed by the caller.) FormatOption returns the number of
+// characters that were consumed from the formatting string.
+int Decoder::FormatOption(Instruction* instr, const char* format) {
+ switch (format[0]) {
+ case 'c': { // 'code for break or trap instructions
+ ASSERT(STRING_STARTS_WITH(format, "code"));
+ PrintCode(instr);
+ return 4;
+ }
+ case 'i': { // 'imm16u or 'imm26
+ if (format[3] == '1') {
+ ASSERT(STRING_STARTS_WITH(format, "imm16"));
+ if (format[5] == 's') {
+ ASSERT(STRING_STARTS_WITH(format, "imm16s"));
+ PrintSImm16(instr);
+ } else if (format[5] == 'u') {
+ ASSERT(STRING_STARTS_WITH(format, "imm16u"));
+ PrintSImm16(instr);
+ } else {
+ ASSERT(STRING_STARTS_WITH(format, "imm16x"));
+ PrintXImm16(instr);
+ }
+ return 6;
+ } else {
+ ASSERT(STRING_STARTS_WITH(format, "imm26"));
+ PrintImm26(instr);
+ return 5;
+ }
+ }
+ case 'r': { // 'r: registers
+ return FormatRegister(instr, format);
+ }
+ case 'f': { // 'f: Cregisters
+ return FormatCRegister(instr, format);
+ }
+ case 's': { // 'sa
+ ASSERT(STRING_STARTS_WITH(format, "sa"));
+ PrintSa(instr);
+ return 2;
+ }
+ };
+ UNREACHABLE();
+ return -1;
+}
+
+
+// Format takes a formatting string for a whole instruction and prints it into
+// the output buffer. All escaped options are handed to FormatOption to be
+// parsed further.
+void Decoder::Format(Instruction* instr, const char* format) {
+ char cur = *format++;
+ while ((cur != 0) && (out_buffer_pos_ < (out_buffer_.length() - 1))) {
+ if (cur == '\'') { // Single quote is used as the formatting escape.
+ format += FormatOption(instr, format);
+ } else {
+ out_buffer_[out_buffer_pos_++] = cur;
+ }
+ cur = *format++;
+ }
+ out_buffer_[out_buffer_pos_] = '\0';
+}
+
+
+// For currently unimplemented decodings the disassembler calls Unknown(instr)
+// which will just print "unknown" of the instruction bits.
+void Decoder::Unknown(Instruction* instr) {
+ Format(instr, "unknown");
+}
+
+
+void Decoder::DecodeTypeRegister(Instruction* instr) {
+ switch (instr->OpcodeFieldRaw()) {
+ case COP1: // Coprocessor instructions
+ switch (instr->RsFieldRaw()) {
+ case BC1: // branch on coprocessor condition
+ UNREACHABLE();
+ break;
+ case MFC1:
+ Format(instr, "mfc1 'rt, 'fs");
+ break;
+ case MFHC1:
+ Format(instr, "mfhc1 rt, 'fs");
+ break;
+ case MTC1:
+ Format(instr, "mtc1 'rt, 'fs");
+ break;
+ case MTHC1:
+ Format(instr, "mthc1 rt, 'fs");
+ break;
+ case S:
+ case D:
+ UNIMPLEMENTED_MIPS();
+ break;
+ case W:
+ switch (instr->FunctionFieldRaw()) {
+ case CVT_S_W:
+ UNIMPLEMENTED_MIPS();
+ break;
+ case CVT_D_W: // Convert word to double.
+ Format(instr, "cvt.d.w 'fd, 'fs");
+ break;
+ default:
+ UNREACHABLE();
+ };
+ break;
+ case L:
+ case PS:
+ UNIMPLEMENTED_MIPS();
+ break;
+ break;
+ default:
+ UNREACHABLE();
+ };
+ break;
+ case SPECIAL:
+ switch (instr->FunctionFieldRaw()) {
+ case JR:
+ Format(instr, "jr 'rs");
+ break;
+ case JALR:
+ Format(instr, "jalr 'rs");
+ break;
+ case SLL:
+ if ( 0x0 == static_cast<int>(instr->InstructionBits()))
+ Format(instr, "nop");
+ else
+ Format(instr, "sll 'rd, 'rt, 'sa");
+ break;
+ case SRL:
+ Format(instr, "srl 'rd, 'rt, 'sa");
+ break;
+ case SRA:
+ Format(instr, "sra 'rd, 'rt, 'sa");
+ break;
+ case SLLV:
+ Format(instr, "sllv 'rd, 'rt, 'rs");
+ break;
+ case SRLV:
+ Format(instr, "srlv 'rd, 'rt, 'rs");
+ break;
+ case SRAV:
+ Format(instr, "srav 'rd, 'rt, 'rs");
+ break;
+ case MFHI:
+ Format(instr, "mfhi 'rd");
+ break;
+ case MFLO:
+ Format(instr, "mflo 'rd");
+ break;
+ case MULT:
+ Format(instr, "mult 'rs, 'rt");
+ break;
+ case MULTU:
+ Format(instr, "multu 'rs, 'rt");
+ break;
+ case DIV:
+ Format(instr, "div 'rs, 'rt");
+ break;
+ case DIVU:
+ Format(instr, "divu 'rs, 'rt");
+ break;
+ case ADD:
+ Format(instr, "add 'rd, 'rs, 'rt");
+ break;
+ case ADDU:
+ Format(instr, "addu 'rd, 'rs, 'rt");
+ break;
+ case SUB:
+ Format(instr, "sub 'rd, 'rs, 'rt");
+ break;
+ case SUBU:
+ Format(instr, "sub 'rd, 'rs, 'rt");
+ break;
+ case AND:
+ Format(instr, "and 'rd, 'rs, 'rt");
+ break;
+ case OR:
+ if (0 == instr->RsField()) {
+ Format(instr, "mov 'rd, 'rt");
+ } else if (0 == instr->RtField()) {
+ Format(instr, "mov 'rd, 'rs");
+ } else {
+ Format(instr, "or 'rd, 'rs, 'rt");
+ }
+ break;
+ case XOR:
+ Format(instr, "xor 'rd, 'rs, 'rt");
+ break;
+ case NOR:
+ Format(instr, "nor 'rd, 'rs, 'rt");
+ break;
+ case SLT:
+ Format(instr, "slt 'rd, 'rs, 'rt");
+ break;
+ case SLTU:
+ Format(instr, "sltu 'rd, 'rs, 'rt");
+ break;
+ case BREAK:
+ Format(instr, "break, code: 'code");
+ break;
+ case TGE:
+ Format(instr, "tge 'rs, 'rt, code: 'code");
+ break;
+ case TGEU:
+ Format(instr, "tgeu 'rs, 'rt, code: 'code");
+ break;
+ case TLT:
+ Format(instr, "tlt 'rs, 'rt, code: 'code");
+ break;
+ case TLTU:
+ Format(instr, "tltu 'rs, 'rt, code: 'code");
+ break;
+ case TEQ:
+ Format(instr, "teq 'rs, 'rt, code: 'code");
+ break;
+ case TNE:
+ Format(instr, "tne 'rs, 'rt, code: 'code");
+ break;
+ default:
+ UNREACHABLE();
+ };
+ break;
+ case SPECIAL2:
+ switch (instr->FunctionFieldRaw()) {
+ case MUL:
+ break;
+ default:
+ UNREACHABLE();
+ };
+ break;
+ default:
+ UNREACHABLE();
+ };
+}
+
+
+void Decoder::DecodeTypeImmediate(Instruction* instr) {
+ switch (instr->OpcodeFieldRaw()) {
+ // ------------- REGIMM class.
+ case REGIMM:
+ switch (instr->RtFieldRaw()) {
+ case BLTZ:
+ Format(instr, "bltz 'rs, 'imm16u");
+ break;
+ case BLTZAL:
+ Format(instr, "bltzal 'rs, 'imm16u");
+ break;
+ case BGEZ:
+ Format(instr, "bgez 'rs, 'imm16u");
+ break;
+ case BGEZAL:
+ Format(instr, "bgezal 'rs, 'imm16u");
+ break;
+ default:
+ UNREACHABLE();
+ };
+ break; // case REGIMM
+ // ------------- Branch instructions.
+ case BEQ:
+ Format(instr, "beq 'rs, 'rt, 'imm16u");
+ break;
+ case BNE:
+ Format(instr, "bne 'rs, 'rt, 'imm16u");
+ break;
+ case BLEZ:
+ Format(instr, "blez 'rs, 'imm16u");
+ break;
+ case BGTZ:
+ Format(instr, "bgtz 'rs, 'imm16u");
+ break;
+ // ------------- Arithmetic instructions.
+ case ADDI:
+ Format(instr, "addi 'rt, 'rs, 'imm16s");
+ break;
+ case ADDIU:
+ Format(instr, "addiu 'rt, 'rs, 'imm16s");
+ break;
+ case SLTI:
+ Format(instr, "slti 'rt, 'rs, 'imm16s");
+ break;
+ case SLTIU:
+ Format(instr, "sltiu 'rt, 'rs, 'imm16u");
+ break;
+ case ANDI:
+ Format(instr, "andi 'rt, 'rs, 'imm16x");
+ break;
+ case ORI:
+ Format(instr, "ori 'rt, 'rs, 'imm16x");
+ break;
+ case XORI:
+ Format(instr, "xori 'rt, 'rs, 'imm16x");
+ break;
+ case LUI:
+ Format(instr, "lui 'rt, 'imm16x");
+ break;
+ // ------------- Memory instructions.
+ case LB:
+ Format(instr, "lb 'rt, 'imm16s('rs)");
+ break;
+ case LW:
+ Format(instr, "lw 'rt, 'imm16s('rs)");
+ break;
+ case LBU:
+ Format(instr, "lbu 'rt, 'imm16s('rs)");
+ break;
+ case SB:
+ Format(instr, "sb 'rt, 'imm16s('rs)");
+ break;
+ case SW:
+ Format(instr, "sw 'rt, 'imm16s('rs)");
+ break;
+ case LWC1:
+ Format(instr, "lwc1 'ft, 'imm16s('rs)");
+ break;
+ case LDC1:
+ Format(instr, "ldc1 'ft, 'imm16s('rs)");
+ break;
+ case SWC1:
+ Format(instr, "swc1 'rt, 'imm16s('fs)");
+ break;
+ case SDC1:
+ Format(instr, "sdc1 'rt, 'imm16s('fs)");
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ };
+}
+
+
+void Decoder::DecodeTypeJump(Instruction* instr) {
+ switch (instr->OpcodeFieldRaw()) {
+ case J:
+ Format(instr, "j 'imm26");
+ break;
+ case JAL:
+ Format(instr, "jal 'imm26");
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+// Disassemble the instruction at *instr_ptr into the output buffer.
+int Decoder::InstructionDecode(byte_* instr_ptr) {
+ Instruction* instr = Instruction::At(instr_ptr);
+ // Print raw instruction bytes.
+ out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%08x ",
+ instr->InstructionBits());
+ switch (instr->InstructionType()) {
+ case Instruction::kRegisterType: {
+ DecodeTypeRegister(instr);
+ break;
+ }
+ case Instruction::kImmediateType: {
+ DecodeTypeImmediate(instr);
+ break;
+ }
+ case Instruction::kJumpType: {
+ DecodeTypeJump(instr);
+ break;
+ }
+ default: {
+ UNSUPPORTED_MIPS();
+ }
+ }
+ return Instruction::kInstructionSize;
+}
+
+
+} } // namespace assembler::mips
+
+
+
+//------------------------------------------------------------------------------
+
+namespace disasm {
+
+namespace v8i = v8::internal;
+
+
+const char* NameConverter::NameOfAddress(byte_* addr) const {
+ static v8::internal::EmbeddedVector<char, 32> tmp_buffer;
+ v8::internal::OS::SNPrintF(tmp_buffer, "%p", addr);
+ return tmp_buffer.start();
+}
+
+
+const char* NameConverter::NameOfConstant(byte_* addr) const {
+ return NameOfAddress(addr);
+}
+
+
+const char* NameConverter::NameOfCPURegister(int reg) const {
+ return assembler::mips::Registers::Name(reg);
+}
+
+
+const char* NameConverter::NameOfXMMRegister(int reg) const {
+ return assembler::mips::FPURegister::Name(reg);
+}
+
+
+const char* NameConverter::NameOfByteCPURegister(int reg) const {
+ UNREACHABLE(); // MIPS does not have the concept of a byte register
+ return "nobytereg";
+}
+
+
+const char* NameConverter::NameInCode(byte_* addr) const {
+ // The default name converter is called for unknown code. So we will not try
+ // to access any memory.
+ return "";
+}
+
+
+//------------------------------------------------------------------------------
+
+Disassembler::Disassembler(const NameConverter& converter)
+ : converter_(converter) {}
+
+
+Disassembler::~Disassembler() {}
+
+
+int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
+ byte_* instruction) {
+ assembler::mips::Decoder d(converter_, buffer);
+ return d.InstructionDecode(instruction);
+}
+
+
+int Disassembler::ConstantPoolSizeAt(byte_* instruction) {
+ UNIMPLEMENTED_MIPS();
+ return -1;
+}
+
+
+void Disassembler::Disassemble(FILE* f, byte_* begin, byte_* end) {
+ NameConverter converter;
+ Disassembler d(converter);
+ for (byte_* pc = begin; pc < end;) {
+ v8::internal::EmbeddedVector<char, 128> buffer;
+ buffer[0] = '\0';
+ byte_* prev_pc = pc;
+ pc += d.InstructionDecode(buffer, pc);
+ fprintf(f, "%p %08x %s\n",
+ prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer.start());
+ }
+}
+
+#undef UNSUPPORTED
+
+} // namespace disasm
+
diff --git a/deps/v8/src/mips/fast-codegen-mips.cc b/deps/v8/src/mips/fast-codegen-mips.cc
new file mode 100644
index 000000000..c47f6326d
--- /dev/null
+++ b/deps/v8/src/mips/fast-codegen-mips.cc
@@ -0,0 +1,56 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "fast-codegen.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm_)
+
+void FastCodeGenerator::Generate(CompilationInfo* info) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FastCodeGenerator::EmitThisPropertyStore(Handle<String> name) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FastCodeGenerator::EmitGlobalVariableLoad(Handle<String> name) {
+ UNIMPLEMENTED_MIPS();
+}
+
+#undef __
+
+
+} } // namespace v8::internal
+
diff --git a/deps/v8/src/mips/frames-mips.cc b/deps/v8/src/mips/frames-mips.cc
new file mode 100644
index 000000000..d2c717ce3
--- /dev/null
+++ b/deps/v8/src/mips/frames-mips.cc
@@ -0,0 +1,100 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#include "v8.h"
+
+#include "frames-inl.h"
+#include "mips/assembler-mips-inl.h"
+
+namespace v8 {
+namespace internal {
+
+
+StackFrame::Type StackFrame::ComputeType(State* state) {
+ ASSERT(state->fp != NULL);
+ if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) {
+ return ARGUMENTS_ADAPTOR;
+ }
+ // The marker and function offsets overlap. If the marker isn't a
+ // smi then the frame is a JavaScript frame -- and the marker is
+ // really the function.
+ const int offset = StandardFrameConstants::kMarkerOffset;
+ Object* marker = Memory::Object_at(state->fp + offset);
+ if (!marker->IsSmi()) return JAVA_SCRIPT;
+ return static_cast<StackFrame::Type>(Smi::cast(marker)->value());
+}
+
+
+StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
+ if (fp == 0) return NONE;
+ // Compute frame type and stack pointer.
+ Address sp = fp + ExitFrameConstants::kSPDisplacement;
+ const int offset = ExitFrameConstants::kCodeOffset;
+ Object* code = Memory::Object_at(fp + offset);
+ bool is_debug_exit = code->IsSmi();
+ if (is_debug_exit) {
+ sp -= kNumJSCallerSaved * kPointerSize;
+ }
+ // Fill in the state.
+ state->sp = sp;
+ state->fp = fp;
+ state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize);
+ return EXIT;
+}
+
+
+void ExitFrame::Iterate(ObjectVisitor* v) const {
+ // Do nothing
+}
+
+
+int JavaScriptFrame::GetProvidedParametersCount() const {
+ return ComputeParametersCount();
+}
+
+
+Address JavaScriptFrame::GetCallerStackPointer() const {
+ UNIMPLEMENTED_MIPS();
+ return static_cast<Address>(NULL); // UNIMPLEMENTED RETURN
+}
+
+
+Address ArgumentsAdaptorFrame::GetCallerStackPointer() const {
+ UNIMPLEMENTED_MIPS();
+ return static_cast<Address>(NULL); // UNIMPLEMENTED RETURN
+}
+
+
+Address InternalFrame::GetCallerStackPointer() const {
+ UNIMPLEMENTED_MIPS();
+ return static_cast<Address>(NULL); // UNIMPLEMENTED RETURN
+}
+
+
+} } // namespace v8::internal
+
diff --git a/deps/v8/src/mips/frames-mips.h b/deps/v8/src/mips/frames-mips.h
new file mode 100644
index 000000000..ec1949d66
--- /dev/null
+++ b/deps/v8/src/mips/frames-mips.h
@@ -0,0 +1,164 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+
+#ifndef V8_MIPS_FRAMES_MIPS_H_
+#define V8_MIPS_FRAMES_MIPS_H_
+
+
+namespace v8 {
+namespace internal {
+
+// Register lists.
+// Note that the bit values must match those used in actual instruction
+// encoding.
+static const int kNumRegs = 32;
+
+static const RegList kJSCallerSaved =
+ 1 << 4 | // a0
+ 1 << 5 | // a1
+ 1 << 6 | // a2
+ 1 << 7; // a3
+
+static const int kNumJSCallerSaved = 4;
+
+
+// Return the code of the n-th caller-saved register available to JavaScript
+// e.g. JSCallerSavedReg(0) returns r0.code() == 0.
+int JSCallerSavedCode(int n);
+
+
+// Callee-saved registers preserved when switching from C to JavaScript.
+static const RegList kCalleeSaved =
+ // Saved temporaries.
+ 1 << 16 | 1 << 17 | 1 << 18 | 1 << 19 |
+ 1 << 20 | 1 << 21 | 1 << 22 | 1 << 23 |
+ // gp, sp, fp
+ 1 << 28 | 1 << 29 | 1 << 30;
+
+static const int kNumCalleeSaved = 11;
+
+
+typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
+
+
+// ----------------------------------------------------
+
+class StackHandlerConstants : public AllStatic {
+ public:
+ static const int kNextOffset = 0 * kPointerSize;
+ static const int kStateOffset = 1 * kPointerSize;
+ static const int kFPOffset = 2 * kPointerSize;
+ static const int kPCOffset = 3 * kPointerSize;
+
+ static const int kSize = kPCOffset + kPointerSize;
+};
+
+
+class EntryFrameConstants : public AllStatic {
+ public:
+ static const int kCallerFPOffset = -3 * kPointerSize;
+};
+
+
+class ExitFrameConstants : public AllStatic {
+ public:
+ // Exit frames have a debug marker on the stack.
+ static const int kSPDisplacement = -1 * kPointerSize;
+
+ // The debug marker is just above the frame pointer.
+ static const int kDebugMarkOffset = -1 * kPointerSize;
+ // Must be the same as kDebugMarkOffset. Alias introduced when upgrading.
+ static const int kCodeOffset = -1 * kPointerSize;
+
+ static const int kSavedRegistersOffset = 0 * kPointerSize;
+
+ // The caller fields are below the frame pointer on the stack.
+ static const int kCallerFPOffset = +0 * kPointerSize;
+ // The calling JS function is between FP and PC.
+ static const int kCallerPCOffset = +1 * kPointerSize;
+
+ // FP-relative displacement of the caller's SP.
+ static const int kCallerSPDisplacement = +4 * kPointerSize;
+};
+
+
+class StandardFrameConstants : public AllStatic {
+ public:
+ static const int kExpressionsOffset = -3 * kPointerSize;
+ static const int kMarkerOffset = -2 * kPointerSize;
+ static const int kContextOffset = -1 * kPointerSize;
+ static const int kCallerFPOffset = 0 * kPointerSize;
+ static const int kCallerPCOffset = +1 * kPointerSize;
+ static const int kCallerSPOffset = +2 * kPointerSize;
+
+ // Size of the MIPS 4 32-bit argument slots.
+ // This is just an alias with a shorter name. Use it from now on.
+ static const int kRArgsSlotsSize = 4 * kPointerSize;
+ static const int kRegularArgsSlotsSize = kRArgsSlotsSize;
+
+ // C/C++ argument slots size.
+ static const int kCArgsSlotsSize = 4 * kPointerSize;
+ // JS argument slots size.
+ static const int kJSArgsSlotsSize = 0 * kPointerSize;
+};
+
+
+class JavaScriptFrameConstants : public AllStatic {
+ public:
+ // FP-relative.
+ static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
+ static const int kSavedRegistersOffset = +2 * kPointerSize;
+ static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
+
+ // Caller SP-relative.
+ static const int kParam0Offset = -2 * kPointerSize;
+ static const int kReceiverOffset = -1 * kPointerSize;
+};
+
+
+class ArgumentsAdaptorFrameConstants : public AllStatic {
+ public:
+ static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
+};
+
+
+class InternalFrameConstants : public AllStatic {
+ public:
+ static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
+};
+
+
+inline Object* JavaScriptFrame::function_slot_object() const {
+ const int offset = JavaScriptFrameConstants::kFunctionOffset;
+ return Memory::Object_at(fp() + offset);
+}
+
+} } // namespace v8::internal
+
+#endif
diff --git a/deps/v8/src/mips/full-codegen-mips.cc b/deps/v8/src/mips/full-codegen-mips.cc
new file mode 100644
index 000000000..920329eea
--- /dev/null
+++ b/deps/v8/src/mips/full-codegen-mips.cc
@@ -0,0 +1,268 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "compiler.h"
+#include "debug.h"
+#include "full-codegen.h"
+#include "parser.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm_)
+
+void FullCodeGenerator::Generate(CompilationInfo* info, Mode mode) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitReturnSequence(int position) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::Apply(Expression::Context context, Register reg) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::Apply(Expression::Context context, Slot* slot) {
+ UNIMPLEMENTED_MIPS();
+}
+
+void FullCodeGenerator::Apply(Expression::Context context, Literal* lit) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::ApplyTOS(Expression::Context context) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::DropAndApply(int count,
+ Expression::Context context,
+ Register reg) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::Apply(Expression::Context context,
+ Label* materialize_true,
+ Label* materialize_false) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::DoTest(Expression::Context context) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+MemOperand FullCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
+ UNIMPLEMENTED_MIPS();
+ return MemOperand(zero_reg, 0); // UNIMPLEMENTED RETURN
+}
+
+
+void FullCodeGenerator::Move(Register destination, Slot* source) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::Move(Slot* dst,
+ Register src,
+ Register scratch1,
+ Register scratch2) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitVariableLoad(Variable* var,
+ Expression::Context context) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitBinaryOp(Token::Value op,
+ Expression::Context context) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitVariableAssignment(Variable* var,
+ Expression::Context context) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::VisitProperty(Property* expr) {
+ UNIMPLEMENTED_MIPS();
+}
+
+void FullCodeGenerator::EmitCallWithIC(Call* expr,
+ Handle<Object> ignored,
+ RelocInfo::Mode mode) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitCallWithStub(Call* expr) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::VisitCall(Call* expr) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::VisitCallNew(CallNew* expr) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+Register FullCodeGenerator::result_register() { return v0; }
+
+
+Register FullCodeGenerator::context_register() { return cp; }
+
+
+void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+// ----------------------------------------------------------------------------
+// Non-local control flow support.
+
+void FullCodeGenerator::EnterFinallyBlock() {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::ExitFinallyBlock() {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+#undef __
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/mips/ic-mips.cc b/deps/v8/src/mips/ic-mips.cc
new file mode 100644
index 000000000..5598cdfcd
--- /dev/null
+++ b/deps/v8/src/mips/ic-mips.cc
@@ -0,0 +1,187 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "ic-inl.h"
+#include "runtime.h"
+#include "stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+
+// ----------------------------------------------------------------------------
+// Static IC stub generators.
+//
+
+#define __ ACCESS_MASM(masm)
+
+
+void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void LoadIC::GenerateStringLength(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+// Defined in ic.cc.
+Object* CallIC_Miss(Arguments args);
+
+void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
+ UNIMPLEMENTED_MIPS();
+}
+
+void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
+ UNIMPLEMENTED_MIPS();
+}
+
+// Defined in ic.cc.
+Object* LoadIC_Miss(Arguments args);
+
+void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void LoadIC::GenerateNormal(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void LoadIC::GenerateMiss(MacroAssembler* masm) {
+ Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
+}
+
+
+void LoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void LoadIC::ClearInlinedVersion(Address address) {}
+bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
+ return false;
+}
+
+void KeyedLoadIC::ClearInlinedVersion(Address address) {}
+bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
+ return false;
+}
+
+void KeyedStoreIC::ClearInlinedVersion(Address address) {}
+void KeyedStoreIC::RestoreInlinedVersion(Address address) {}
+bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
+ return false;
+}
+
+
+Object* KeyedLoadIC_Miss(Arguments args);
+
+
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
+ Generate(masm, ExternalReference(IC_Utility(kKeyedLoadIC_Miss)));
+}
+
+
+void KeyedLoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
+ ExternalArrayType array_type) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void KeyedStoreIC::Generate(MacroAssembler* masm,
+ const ExternalReference& f) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
+ ExternalArrayType array_type) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void KeyedStoreIC::GenerateExtendStorage(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void StoreIC::GenerateExtendStorage(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void StoreIC::GenerateMiss(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+#undef __
+
+} } // namespace v8::internal
+
diff --git a/deps/v8/src/mips/jump-target-mips.cc b/deps/v8/src/mips/jump-target-mips.cc
new file mode 100644
index 000000000..3301d1991
--- /dev/null
+++ b/deps/v8/src/mips/jump-target-mips.cc
@@ -0,0 +1,87 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "jump-target-inl.h"
+#include "register-allocator-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// -------------------------------------------------------------------------
+// JumpTarget implementation.
+
+#define __ ACCESS_MASM(cgen()->masm())
+
+void JumpTarget::DoJump() {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void JumpTarget::DoBranch(Condition cc, Hint ignored) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void JumpTarget::Call() {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void JumpTarget::DoBind() {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void BreakTarget::Jump() {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void BreakTarget::Jump(Result* arg) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void BreakTarget::Bind() {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void BreakTarget::Bind(Result* arg) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+#undef __
+
+
+} } // namespace v8::internal
+
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
new file mode 100644
index 000000000..b733bdd92
--- /dev/null
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -0,0 +1,895 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+
+#include "v8.h"
+
+#include "bootstrapper.h"
+#include "codegen-inl.h"
+#include "debug.h"
+#include "runtime.h"
+
+namespace v8 {
+namespace internal {
+
+MacroAssembler::MacroAssembler(void* buffer, int size)
+ : Assembler(buffer, size),
+ unresolved_(0),
+ generating_stub_(false),
+ allow_stub_calls_(true),
+ code_object_(Heap::undefined_value()) {
+}
+
+
+
+void MacroAssembler::Jump(Register target, Condition cond,
+ Register r1, const Operand& r2) {
+ Jump(Operand(target), cond, r1, r2);
+}
+
+
+void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
+ Condition cond, Register r1, const Operand& r2) {
+ Jump(Operand(target), cond, r1, r2);
+}
+
+
+void MacroAssembler::Jump(byte* target, RelocInfo::Mode rmode,
+ Condition cond, Register r1, const Operand& r2) {
+ ASSERT(!RelocInfo::IsCodeTarget(rmode));
+ Jump(reinterpret_cast<intptr_t>(target), rmode, cond, r1, r2);
+}
+
+
+void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
+ Condition cond, Register r1, const Operand& r2) {
+ ASSERT(RelocInfo::IsCodeTarget(rmode));
+ Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
+}
+
+
+void MacroAssembler::Call(Register target,
+ Condition cond, Register r1, const Operand& r2) {
+ Call(Operand(target), cond, r1, r2);
+}
+
+
+void MacroAssembler::Call(intptr_t target, RelocInfo::Mode rmode,
+ Condition cond, Register r1, const Operand& r2) {
+ Call(Operand(target), cond, r1, r2);
+}
+
+
+void MacroAssembler::Call(byte* target, RelocInfo::Mode rmode,
+ Condition cond, Register r1, const Operand& r2) {
+ ASSERT(!RelocInfo::IsCodeTarget(rmode));
+ Call(reinterpret_cast<intptr_t>(target), rmode, cond, r1, r2);
+}
+
+
+void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
+ Condition cond, Register r1, const Operand& r2) {
+ ASSERT(RelocInfo::IsCodeTarget(rmode));
+ Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond, r1, r2);
+}
+
+
+void MacroAssembler::Ret(Condition cond, Register r1, const Operand& r2) {
+ Jump(Operand(ra), cond, r1, r2);
+}
+
+
+void MacroAssembler::LoadRoot(Register destination,
+ Heap::RootListIndex index) {
+ lw(destination, MemOperand(s4, index << kPointerSizeLog2));
+}
+
+void MacroAssembler::LoadRoot(Register destination,
+ Heap::RootListIndex index,
+ Condition cond,
+ Register src1, const Operand& src2) {
+ Branch(NegateCondition(cond), 2, src1, src2);
+ nop();
+ lw(destination, MemOperand(s4, index << kPointerSizeLog2));
+}
+
+
+void MacroAssembler::RecordWrite(Register object, Register offset,
+ Register scratch) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+// ---------------------------------------------------------------------------
+// Instruction macros
+
+void MacroAssembler::Add(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ add(rd, rs, rt.rm());
+ } else {
+ if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
+ addi(rd, rs, rt.imm32_);
+ } else {
+ // li handles the relocation.
+ ASSERT(!rs.is(at));
+ li(at, rt);
+ add(rd, rs, at);
+ }
+ }
+}
+
+
+void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ addu(rd, rs, rt.rm());
+ } else {
+ if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
+ addiu(rd, rs, rt.imm32_);
+ } else {
+ // li handles the relocation.
+ ASSERT(!rs.is(at));
+ li(at, rt);
+ addu(rd, rs, at);
+ }
+ }
+}
+
+
+void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ mul(rd, rs, rt.rm());
+ } else {
+ // li handles the relocation.
+ ASSERT(!rs.is(at));
+ li(at, rt);
+ mul(rd, rs, at);
+ }
+}
+
+
+void MacroAssembler::Mult(Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ mult(rs, rt.rm());
+ } else {
+ // li handles the relocation.
+ ASSERT(!rs.is(at));
+ li(at, rt);
+ mult(rs, at);
+ }
+}
+
+
+void MacroAssembler::Multu(Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ multu(rs, rt.rm());
+ } else {
+ // li handles the relocation.
+ ASSERT(!rs.is(at));
+ li(at, rt);
+ multu(rs, at);
+ }
+}
+
+
+void MacroAssembler::Div(Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ div(rs, rt.rm());
+ } else {
+ // li handles the relocation.
+ ASSERT(!rs.is(at));
+ li(at, rt);
+ div(rs, at);
+ }
+}
+
+
+void MacroAssembler::Divu(Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ divu(rs, rt.rm());
+ } else {
+ // li handles the relocation.
+ ASSERT(!rs.is(at));
+ li(at, rt);
+ divu(rs, at);
+ }
+}
+
+
+void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ and_(rd, rs, rt.rm());
+ } else {
+ if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
+ andi(rd, rs, rt.imm32_);
+ } else {
+ // li handles the relocation.
+ ASSERT(!rs.is(at));
+ li(at, rt);
+ and_(rd, rs, at);
+ }
+ }
+}
+
+
+void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ or_(rd, rs, rt.rm());
+ } else {
+ if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
+ ori(rd, rs, rt.imm32_);
+ } else {
+ // li handles the relocation.
+ ASSERT(!rs.is(at));
+ li(at, rt);
+ or_(rd, rs, at);
+ }
+ }
+}
+
+
+void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ xor_(rd, rs, rt.rm());
+ } else {
+ if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
+ xori(rd, rs, rt.imm32_);
+ } else {
+ // li handles the relocation.
+ ASSERT(!rs.is(at));
+ li(at, rt);
+ xor_(rd, rs, at);
+ }
+ }
+}
+
+
+void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ nor(rd, rs, rt.rm());
+ } else {
+ // li handles the relocation.
+ ASSERT(!rs.is(at));
+ li(at, rt);
+ nor(rd, rs, at);
+ }
+}
+
+
+void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ slt(rd, rs, rt.rm());
+ } else {
+ if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
+ slti(rd, rs, rt.imm32_);
+ } else {
+ // li handles the relocation.
+ ASSERT(!rs.is(at));
+ li(at, rt);
+ slt(rd, rs, at);
+ }
+ }
+}
+
+
+void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ sltu(rd, rs, rt.rm());
+ } else {
+ if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
+ sltiu(rd, rs, rt.imm32_);
+ } else {
+ // li handles the relocation.
+ ASSERT(!rs.is(at));
+ li(at, rt);
+ sltu(rd, rs, at);
+ }
+ }
+}
+
+
+//------------Pseudo-instructions-------------
+
+void MacroAssembler::movn(Register rd, Register rt) {
+ addiu(at, zero_reg, -1); // Fill at with ones.
+ xor_(rd, rt, at);
+}
+
+
+// load wartd in a register
+void MacroAssembler::li(Register rd, Operand j, bool gen2instr) {
+ ASSERT(!j.is_reg());
+
+ if (!MustUseAt(j.rmode_) && !gen2instr) {
+ // Normal load of an immediate value which does not need Relocation Info.
+ if (is_int16(j.imm32_)) {
+ addiu(rd, zero_reg, j.imm32_);
+ } else if (!(j.imm32_ & HIMask)) {
+ ori(rd, zero_reg, j.imm32_);
+ } else if (!(j.imm32_ & LOMask)) {
+ lui(rd, (HIMask & j.imm32_) >> 16);
+ } else {
+ lui(rd, (HIMask & j.imm32_) >> 16);
+ ori(rd, rd, (LOMask & j.imm32_));
+ }
+ } else if (MustUseAt(j.rmode_) || gen2instr) {
+ if (MustUseAt(j.rmode_)) {
+ RecordRelocInfo(j.rmode_, j.imm32_);
+ }
+ // We need always the same number of instructions as we may need to patch
+ // this code to load another value which may need 2 instructions to load.
+ if (is_int16(j.imm32_)) {
+ nop();
+ addiu(rd, zero_reg, j.imm32_);
+ } else if (!(j.imm32_ & HIMask)) {
+ nop();
+ ori(rd, zero_reg, j.imm32_);
+ } else if (!(j.imm32_ & LOMask)) {
+ nop();
+ lui(rd, (HIMask & j.imm32_) >> 16);
+ } else {
+ lui(rd, (HIMask & j.imm32_) >> 16);
+ ori(rd, rd, (LOMask & j.imm32_));
+ }
+ }
+}
+
+
+// Exception-generating instructions and debugging support
+void MacroAssembler::stop(const char* msg) {
+ // TO_UPGRADE: Just a break for now. Maybe we could upgrade it.
+ // We use the 0x54321 value to be able to find it easily when reading memory.
+ break_(0x54321);
+}
+
+
+void MacroAssembler::MultiPush(RegList regs) {
+ int16_t NumSaved = 0;
+ int16_t NumToPush = NumberOfBitsSet(regs);
+
+ addiu(sp, sp, -4 * NumToPush);
+ for (int16_t i = 0; i < kNumRegisters; i++) {
+ if ((regs & (1 << i)) != 0) {
+ sw(ToRegister(i), MemOperand(sp, 4 * (NumToPush - ++NumSaved)));
+ }
+ }
+}
+
+
+void MacroAssembler::MultiPushReversed(RegList regs) {
+ int16_t NumSaved = 0;
+ int16_t NumToPush = NumberOfBitsSet(regs);
+
+ addiu(sp, sp, -4 * NumToPush);
+ for (int16_t i = kNumRegisters; i > 0; i--) {
+ if ((regs & (1 << i)) != 0) {
+ sw(ToRegister(i), MemOperand(sp, 4 * (NumToPush - ++NumSaved)));
+ }
+ }
+}
+
+
+void MacroAssembler::MultiPop(RegList regs) {
+ int16_t NumSaved = 0;
+
+ for (int16_t i = kNumRegisters; i > 0; i--) {
+ if ((regs & (1 << i)) != 0) {
+ lw(ToRegister(i), MemOperand(sp, 4 * (NumSaved++)));
+ }
+ }
+ addiu(sp, sp, 4 * NumSaved);
+}
+
+
+void MacroAssembler::MultiPopReversed(RegList regs) {
+ int16_t NumSaved = 0;
+
+ for (int16_t i = 0; i < kNumRegisters; i++) {
+ if ((regs & (1 << i)) != 0) {
+ lw(ToRegister(i), MemOperand(sp, 4 * (NumSaved++)));
+ }
+ }
+ addiu(sp, sp, 4 * NumSaved);
+}
+
+
+// Emulated condtional branches do not emit a nop in the branch delay slot.
+
+// Trashes the at register if no scratch register is provided.
+void MacroAssembler::Branch(Condition cond, int16_t offset, Register rs,
+ const Operand& rt, Register scratch) {
+ Register r2;
+ if (rt.is_reg()) {
+ // We don't want any other register but scratch clobbered.
+ ASSERT(!scratch.is(rs) && !scratch.is(rt.rm_));
+ r2 = rt.rm_;
+ } else if (cond != cc_always) {
+ // We don't want any other register but scratch clobbered.
+ ASSERT(!scratch.is(rs));
+ r2 = scratch;
+ li(r2, rt);
+ }
+
+ switch (cond) {
+ case cc_always:
+ b(offset);
+ break;
+ case eq:
+ beq(rs, r2, offset);
+ break;
+ case ne:
+ bne(rs, r2, offset);
+ break;
+
+ // Signed comparison
+ case greater:
+ slt(scratch, r2, rs);
+ bne(scratch, zero_reg, offset);
+ break;
+ case greater_equal:
+ slt(scratch, rs, r2);
+ beq(scratch, zero_reg, offset);
+ break;
+ case less:
+ slt(scratch, rs, r2);
+ bne(scratch, zero_reg, offset);
+ break;
+ case less_equal:
+ slt(scratch, r2, rs);
+ beq(scratch, zero_reg, offset);
+ break;
+
+ // Unsigned comparison.
+ case Ugreater:
+ sltu(scratch, r2, rs);
+ bne(scratch, zero_reg, offset);
+ break;
+ case Ugreater_equal:
+ sltu(scratch, rs, r2);
+ beq(scratch, zero_reg, offset);
+ break;
+ case Uless:
+ sltu(scratch, rs, r2);
+ bne(scratch, zero_reg, offset);
+ break;
+ case Uless_equal:
+ sltu(scratch, r2, rs);
+ beq(scratch, zero_reg, offset);
+ break;
+
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void MacroAssembler::Branch(Condition cond, Label* L, Register rs,
+ const Operand& rt, Register scratch) {
+ Register r2;
+ if (rt.is_reg()) {
+ r2 = rt.rm_;
+ } else if (cond != cc_always) {
+ r2 = scratch;
+ li(r2, rt);
+ }
+
+ // We use branch_offset as an argument for the branch instructions to be sure
+ // it is called just before generating the branch instruction, as needed.
+
+ switch (cond) {
+ case cc_always:
+ b(shifted_branch_offset(L, false));
+ break;
+ case eq:
+ beq(rs, r2, shifted_branch_offset(L, false));
+ break;
+ case ne:
+ bne(rs, r2, shifted_branch_offset(L, false));
+ break;
+
+ // Signed comparison
+ case greater:
+ slt(scratch, r2, rs);
+ bne(scratch, zero_reg, shifted_branch_offset(L, false));
+ break;
+ case greater_equal:
+ slt(scratch, rs, r2);
+ beq(scratch, zero_reg, shifted_branch_offset(L, false));
+ break;
+ case less:
+ slt(scratch, rs, r2);
+ bne(scratch, zero_reg, shifted_branch_offset(L, false));
+ break;
+ case less_equal:
+ slt(scratch, r2, rs);
+ beq(scratch, zero_reg, shifted_branch_offset(L, false));
+ break;
+
+ // Unsigned comparison.
+ case Ugreater:
+ sltu(scratch, r2, rs);
+ bne(scratch, zero_reg, shifted_branch_offset(L, false));
+ break;
+ case Ugreater_equal:
+ sltu(scratch, rs, r2);
+ beq(scratch, zero_reg, shifted_branch_offset(L, false));
+ break;
+ case Uless:
+ sltu(scratch, rs, r2);
+ bne(scratch, zero_reg, shifted_branch_offset(L, false));
+ break;
+ case Uless_equal:
+ sltu(scratch, r2, rs);
+ beq(scratch, zero_reg, shifted_branch_offset(L, false));
+ break;
+
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+// Trashes the at register if no scratch register is provided.
+// We need to use a bgezal or bltzal, but they can't be used directly with the
+// slt instructions. We could use sub or add instead but we would miss overflow
+// cases, so we keep slt and add an intermediate third instruction.
+void MacroAssembler::BranchAndLink(Condition cond, int16_t offset, Register rs,
+ const Operand& rt, Register scratch) {
+ Register r2;
+ if (rt.is_reg()) {
+ r2 = rt.rm_;
+ } else if (cond != cc_always) {
+ r2 = scratch;
+ li(r2, rt);
+ }
+
+ switch (cond) {
+ case cc_always:
+ bal(offset);
+ break;
+ case eq:
+ bne(rs, r2, 2);
+ nop();
+ bal(offset);
+ break;
+ case ne:
+ beq(rs, r2, 2);
+ nop();
+ bal(offset);
+ break;
+
+ // Signed comparison
+ case greater:
+ slt(scratch, r2, rs);
+ addiu(scratch, scratch, -1);
+ bgezal(scratch, offset);
+ break;
+ case greater_equal:
+ slt(scratch, rs, r2);
+ addiu(scratch, scratch, -1);
+ bltzal(scratch, offset);
+ break;
+ case less:
+ slt(scratch, rs, r2);
+ addiu(scratch, scratch, -1);
+ bgezal(scratch, offset);
+ break;
+ case less_equal:
+ slt(scratch, r2, rs);
+ addiu(scratch, scratch, -1);
+ bltzal(scratch, offset);
+ break;
+
+ // Unsigned comparison.
+ case Ugreater:
+ sltu(scratch, r2, rs);
+ addiu(scratch, scratch, -1);
+ bgezal(scratch, offset);
+ break;
+ case Ugreater_equal:
+ sltu(scratch, rs, r2);
+ addiu(scratch, scratch, -1);
+ bltzal(scratch, offset);
+ break;
+ case Uless:
+ sltu(scratch, rs, r2);
+ addiu(scratch, scratch, -1);
+ bgezal(scratch, offset);
+ break;
+ case Uless_equal:
+ sltu(scratch, r2, rs);
+ addiu(scratch, scratch, -1);
+ bltzal(scratch, offset);
+ break;
+
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void MacroAssembler::BranchAndLink(Condition cond, Label* L, Register rs,
+ const Operand& rt, Register scratch) {
+ Register r2;
+ if (rt.is_reg()) {
+ r2 = rt.rm_;
+ } else if (cond != cc_always) {
+ r2 = scratch;
+ li(r2, rt);
+ }
+
+ switch (cond) {
+ case cc_always:
+ bal(shifted_branch_offset(L, false));
+ break;
+ case eq:
+ bne(rs, r2, 2);
+ nop();
+ bal(shifted_branch_offset(L, false));
+ break;
+ case ne:
+ beq(rs, r2, 2);
+ nop();
+ bal(shifted_branch_offset(L, false));
+ break;
+
+ // Signed comparison
+ case greater:
+ slt(scratch, r2, rs);
+ addiu(scratch, scratch, -1);
+ bgezal(scratch, shifted_branch_offset(L, false));
+ break;
+ case greater_equal:
+ slt(scratch, rs, r2);
+ addiu(scratch, scratch, -1);
+ bltzal(scratch, shifted_branch_offset(L, false));
+ break;
+ case less:
+ slt(scratch, rs, r2);
+ addiu(scratch, scratch, -1);
+ bgezal(scratch, shifted_branch_offset(L, false));
+ break;
+ case less_equal:
+ slt(scratch, r2, rs);
+ addiu(scratch, scratch, -1);
+ bltzal(scratch, shifted_branch_offset(L, false));
+ break;
+
+ // Unsigned comparison.
+ case Ugreater:
+ sltu(scratch, r2, rs);
+ addiu(scratch, scratch, -1);
+ bgezal(scratch, shifted_branch_offset(L, false));
+ break;
+ case Ugreater_equal:
+ sltu(scratch, rs, r2);
+ addiu(scratch, scratch, -1);
+ bltzal(scratch, shifted_branch_offset(L, false));
+ break;
+ case Uless:
+ sltu(scratch, rs, r2);
+ addiu(scratch, scratch, -1);
+ bgezal(scratch, shifted_branch_offset(L, false));
+ break;
+ case Uless_equal:
+ sltu(scratch, r2, rs);
+ addiu(scratch, scratch, -1);
+ bltzal(scratch, shifted_branch_offset(L, false));
+ break;
+
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void MacroAssembler::Jump(const Operand& target,
+ Condition cond, Register rs, const Operand& rt) {
+ if (target.is_reg()) {
+ if (cond == cc_always) {
+ jr(target.rm());
+ } else {
+ Branch(NegateCondition(cond), 2, rs, rt);
+ nop();
+ jr(target.rm());
+ }
+ } else { // !target.is_reg()
+ if (!MustUseAt(target.rmode_)) {
+ if (cond == cc_always) {
+ j(target.imm32_);
+ } else {
+ Branch(NegateCondition(cond), 2, rs, rt);
+ nop();
+ j(target.imm32_); // will generate only one instruction.
+ }
+ } else { // MustUseAt(target)
+ li(at, rt);
+ if (cond == cc_always) {
+ jr(at);
+ } else {
+ Branch(NegateCondition(cond), 2, rs, rt);
+ nop();
+ jr(at); // will generate only one instruction.
+ }
+ }
+ }
+}
+
+
+void MacroAssembler::Call(const Operand& target,
+ Condition cond, Register rs, const Operand& rt) {
+ if (target.is_reg()) {
+ if (cond == cc_always) {
+ jalr(target.rm());
+ } else {
+ Branch(NegateCondition(cond), 2, rs, rt);
+ nop();
+ jalr(target.rm());
+ }
+ } else { // !target.is_reg()
+ if (!MustUseAt(target.rmode_)) {
+ if (cond == cc_always) {
+ jal(target.imm32_);
+ } else {
+ Branch(NegateCondition(cond), 2, rs, rt);
+ nop();
+ jal(target.imm32_); // will generate only one instruction.
+ }
+ } else { // MustUseAt(target)
+ li(at, rt);
+ if (cond == cc_always) {
+ jalr(at);
+ } else {
+ Branch(NegateCondition(cond), 2, rs, rt);
+ nop();
+ jalr(at); // will generate only one instruction.
+ }
+ }
+ }
+}
+
+void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void MacroAssembler::Drop(int count, Condition cond) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void MacroAssembler::Call(Label* target) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+// ---------------------------------------------------------------------------
+// Exception handling
+
+void MacroAssembler::PushTryHandler(CodeLocation try_location,
+ HandlerType type) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void MacroAssembler::PopTryHandler() {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+
+// ---------------------------------------------------------------------------
+// Activation frames
+
+void MacroAssembler::CallStub(CodeStub* stub, Condition cond,
+ Register r1, const Operand& r2) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void MacroAssembler::StubReturn(int argc) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void MacroAssembler::TailCallRuntime(const ExternalReference& ext,
+ int num_arguments,
+ int result_size) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void MacroAssembler::JumpToRuntime(const ExternalReference& builtin) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+Handle<Code> MacroAssembler::ResolveBuiltin(Builtins::JavaScript id,
+ bool* resolved) {
+ UNIMPLEMENTED_MIPS();
+ return Handle<Code>(reinterpret_cast<Code*>(NULL)); // UNIMPLEMENTED RETURN
+}
+
+
+void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
+ InvokeJSFlags flags) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void MacroAssembler::SetCounter(StatsCounter* counter, int value,
+ Register scratch1, Register scratch2) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
+ Register scratch1, Register scratch2) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
+ Register scratch1, Register scratch2) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+
+void MacroAssembler::Assert(Condition cc, const char* msg,
+ Register rs, Operand rt) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void MacroAssembler::Check(Condition cc, const char* msg,
+ Register rs, Operand rt) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void MacroAssembler::Abort(const char* msg) {
+ UNIMPLEMENTED_MIPS();
+}
+
+} } // namespace v8::internal
+
diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h
new file mode 100644
index 000000000..aea98366e
--- /dev/null
+++ b/deps/v8/src/mips/macro-assembler-mips.h
@@ -0,0 +1,381 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
+#define V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
+
+#include "assembler.h"
+#include "mips/assembler-mips.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declaration.
+class JumpTarget;
+
+// Register at is used for instruction generation. So it is not safe to use it
+// unless we know exactly what we do.
+
+// Registers aliases
+const Register cp = s7; // JavaScript context pointer
+const Register fp = s8_fp; // Alias fp
+
+enum InvokeJSFlags {
+ CALL_JS,
+ JUMP_JS
+};
+
+// MacroAssembler implements a collection of frequently used macros.
+class MacroAssembler: public Assembler {
+ public:
+ MacroAssembler(void* buffer, int size);
+
+ // Jump, Call, and Ret pseudo instructions implementing inter-working.
+ void Jump(const Operand& target,
+ Condition cond = cc_always,
+ Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
+ void Call(const Operand& target,
+ Condition cond = cc_always,
+ Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
+ void Jump(Register target,
+ Condition cond = cc_always,
+ Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
+ void Jump(byte* target, RelocInfo::Mode rmode,
+ Condition cond = cc_always,
+ Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
+ void Jump(Handle<Code> code, RelocInfo::Mode rmode,
+ Condition cond = cc_always,
+ Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
+ void Call(Register target,
+ Condition cond = cc_always,
+ Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
+ void Call(byte* target, RelocInfo::Mode rmode,
+ Condition cond = cc_always,
+ Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
+ void Call(Handle<Code> code, RelocInfo::Mode rmode,
+ Condition cond = cc_always,
+ Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
+ void Ret(Condition cond = cc_always,
+ Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
+ void Branch(Condition cond, int16_t offset, Register rs = zero_reg,
+ const Operand& rt = Operand(zero_reg), Register scratch = at);
+ void Branch(Condition cond, Label* L, Register rs = zero_reg,
+ const Operand& rt = Operand(zero_reg), Register scratch = at);
+ // conditionnal branch and link
+ void BranchAndLink(Condition cond, int16_t offset, Register rs = zero_reg,
+ const Operand& rt = Operand(zero_reg),
+ Register scratch = at);
+ void BranchAndLink(Condition cond, Label* L, Register rs = zero_reg,
+ const Operand& rt = Operand(zero_reg),
+ Register scratch = at);
+
+ // Emit code to discard a non-negative number of pointer-sized elements
+ // from the stack, clobbering only the sp register.
+ void Drop(int count, Condition cond = cc_always);
+
+ void Call(Label* target);
+
+ // Jump unconditionally to given label.
+ // We NEED a nop in the branch delay slot, as it used by v8, for example in
+ // CodeGenerator::ProcessDeferred().
+ // Use rather b(Label) for code generation.
+ void jmp(Label* L) {
+ Branch(cc_always, L);
+ nop();
+ }
+
+ // Load an object from the root table.
+ void LoadRoot(Register destination,
+ Heap::RootListIndex index);
+ void LoadRoot(Register destination,
+ Heap::RootListIndex index,
+ Condition cond, Register src1, const Operand& src2);
+
+ // Sets the remembered set bit for [address+offset], where address is the
+ // address of the heap object 'object'. The address must be in the first 8K
+ // of an allocated page. The 'scratch' register is used in the
+ // implementation and all 3 registers are clobbered by the operation, as
+ // well as the ip register.
+ void RecordWrite(Register object, Register offset, Register scratch);
+
+
+ // ---------------------------------------------------------------------------
+ // Instruction macros
+
+#define DEFINE_INSTRUCTION(instr) \
+ void instr(Register rd, Register rs, const Operand& rt); \
+ void instr(Register rd, Register rs, Register rt) { \
+ instr(rd, rs, Operand(rt)); \
+ } \
+ void instr(Register rs, Register rt, int32_t j) { \
+ instr(rs, rt, Operand(j)); \
+ }
+
+#define DEFINE_INSTRUCTION2(instr) \
+ void instr(Register rs, const Operand& rt); \
+ void instr(Register rs, Register rt) { \
+ instr(rs, Operand(rt)); \
+ } \
+ void instr(Register rs, int32_t j) { \
+ instr(rs, Operand(j)); \
+ }
+
+ DEFINE_INSTRUCTION(Add);
+ DEFINE_INSTRUCTION(Addu);
+ DEFINE_INSTRUCTION(Mul);
+ DEFINE_INSTRUCTION2(Mult);
+ DEFINE_INSTRUCTION2(Multu);
+ DEFINE_INSTRUCTION2(Div);
+ DEFINE_INSTRUCTION2(Divu);
+
+ DEFINE_INSTRUCTION(And);
+ DEFINE_INSTRUCTION(Or);
+ DEFINE_INSTRUCTION(Xor);
+ DEFINE_INSTRUCTION(Nor);
+
+ DEFINE_INSTRUCTION(Slt);
+ DEFINE_INSTRUCTION(Sltu);
+
+#undef DEFINE_INSTRUCTION
+#undef DEFINE_INSTRUCTION2
+
+
+ //------------Pseudo-instructions-------------
+
+ void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); }
+ // Move the logical ones complement of source to dest.
+ void movn(Register rd, Register rt);
+
+
+ // load int32 in the rd register
+ void li(Register rd, Operand j, bool gen2instr = false);
+ inline void li(Register rd, int32_t j, bool gen2instr = false) {
+ li(rd, Operand(j), gen2instr);
+ }
+
+ // Exception-generating instructions and debugging support
+ void stop(const char* msg);
+
+
+ // Push multiple registers on the stack.
+ // With MultiPush, lower registers are pushed first on the stack.
+ // For example if you push t0, t1, s0, and ra you get:
+ // | |
+ // |-----------------------|
+ // | t0 | +
+ // |-----------------------| |
+ // | t1 | |
+ // |-----------------------| |
+ // | s0 | v
+ // |-----------------------| -
+ // | ra |
+ // |-----------------------|
+ // | |
+ void MultiPush(RegList regs);
+ void MultiPushReversed(RegList regs);
+ void Push(Register src) {
+ Addu(sp, sp, Operand(-kPointerSize));
+ sw(src, MemOperand(sp, 0));
+ }
+ inline void push(Register src) { Push(src); }
+
+ void Push(Register src, Condition cond, Register tst1, Register tst2) {
+ // Since we don't have conditionnal execution we use a Branch.
+ Branch(cond, 3, tst1, Operand(tst2));
+ nop();
+ Addu(sp, sp, Operand(-kPointerSize));
+ sw(src, MemOperand(sp, 0));
+ }
+
+ // Pops multiple values from the stack and load them in the
+ // registers specified in regs. Pop order is the opposite as in MultiPush.
+ void MultiPop(RegList regs);
+ void MultiPopReversed(RegList regs);
+ void Pop(Register dst) {
+ lw(dst, MemOperand(sp, 0));
+ Addu(sp, sp, Operand(kPointerSize));
+ }
+ void Pop() {
+ Add(sp, sp, Operand(kPointerSize));
+ }
+
+
+ // ---------------------------------------------------------------------------
+ // Exception handling
+
+ // Push a new try handler and link into try handler chain.
+ // The return address must be passed in register lr.
+ // On exit, r0 contains TOS (code slot).
+ void PushTryHandler(CodeLocation try_location, HandlerType type);
+
+ // Unlink the stack handler on top of the stack from the try handler chain.
+ // Must preserve the result register.
+ void PopTryHandler();
+
+
+ // ---------------------------------------------------------------------------
+ // Support functions.
+
+ inline void BranchOnSmi(Register value, Label* smi_label,
+ Register scratch = at) {
+ ASSERT_EQ(0, kSmiTag);
+ andi(scratch, value, kSmiTagMask);
+ Branch(eq, smi_label, scratch, Operand(zero_reg));
+ }
+
+
+ inline void BranchOnNotSmi(Register value, Label* not_smi_label,
+ Register scratch = at) {
+ ASSERT_EQ(0, kSmiTag);
+ andi(scratch, value, kSmiTagMask);
+ Branch(ne, not_smi_label, scratch, Operand(zero_reg));
+ }
+
+
+ // ---------------------------------------------------------------------------
+ // Runtime calls
+
+ // Call a code stub.
+ void CallStub(CodeStub* stub, Condition cond = cc_always,
+ Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
+ void CallJSExitStub(CodeStub* stub);
+
+ // Return from a code stub after popping its arguments.
+ void StubReturn(int argc);
+
+ // Call a runtime routine.
+ // Eventually this should be used for all C calls.
+ void CallRuntime(Runtime::Function* f, int num_arguments);
+
+ // Convenience function: Same as above, but takes the fid instead.
+ void CallRuntime(Runtime::FunctionId fid, int num_arguments);
+
+ // Tail call of a runtime routine (jump).
+ // Like JumpToRuntime, but also takes care of passing the number
+ // of parameters.
+ void TailCallRuntime(const ExternalReference& ext,
+ int num_arguments,
+ int result_size);
+
+ // Jump to the builtin routine.
+ void JumpToRuntime(const ExternalReference& builtin);
+
+ // Invoke specified builtin JavaScript function. Adds an entry to
+ // the unresolved list if the name does not resolve.
+ void InvokeBuiltin(Builtins::JavaScript id, InvokeJSFlags flags);
+
+ // Store the code object for the given builtin in the target register and
+ // setup the function in r1.
+ void GetBuiltinEntry(Register target, Builtins::JavaScript id);
+
+ struct Unresolved {
+ int pc;
+ uint32_t flags; // see Bootstrapper::FixupFlags decoders/encoders.
+ const char* name;
+ };
+ List<Unresolved>* unresolved() { return &unresolved_; }
+
+ Handle<Object> CodeObject() { return code_object_; }
+
+
+ // ---------------------------------------------------------------------------
+ // Stack limit support
+
+ void StackLimitCheck(Label* on_stack_limit_hit);
+
+
+ // ---------------------------------------------------------------------------
+ // StatsCounter support
+
+ void SetCounter(StatsCounter* counter, int value,
+ Register scratch1, Register scratch2);
+ void IncrementCounter(StatsCounter* counter, int value,
+ Register scratch1, Register scratch2);
+ void DecrementCounter(StatsCounter* counter, int value,
+ Register scratch1, Register scratch2);
+
+
+ // ---------------------------------------------------------------------------
+ // Debugging
+
+ // Calls Abort(msg) if the condition cc is not satisfied.
+ // Use --debug_code to enable.
+ void Assert(Condition cc, const char* msg, Register rs, Operand rt);
+
+ // Like Assert(), but always enabled.
+ void Check(Condition cc, const char* msg, Register rs, Operand rt);
+
+ // Print a message to stdout and abort execution.
+ void Abort(const char* msg);
+
+ // Verify restrictions about code generated in stubs.
+ void set_generating_stub(bool value) { generating_stub_ = value; }
+ bool generating_stub() { return generating_stub_; }
+ void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
+ bool allow_stub_calls() { return allow_stub_calls_; }
+
+ private:
+ void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = cc_always,
+ Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
+ void Call(intptr_t target, RelocInfo::Mode rmode, Condition cond = cc_always,
+ Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
+
+ // Get the code for the given builtin. Returns if able to resolve
+ // the function in the 'resolved' flag.
+ Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
+
+ List<Unresolved> unresolved_;
+ bool generating_stub_;
+ bool allow_stub_calls_;
+ // This handle will be patched with the code object on installation.
+ Handle<Object> code_object_;
+};
+
+
+// -----------------------------------------------------------------------------
+// Static helper functions.
+
+// Generate a MemOperand for loading a field from an object.
+static inline MemOperand FieldMemOperand(Register object, int offset) {
+ return MemOperand(object, offset - kHeapObjectTag);
+}
+
+
+
+#ifdef GENERATED_CODE_COVERAGE
+#define CODE_COVERAGE_STRINGIFY(x) #x
+#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
+#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
+#define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
+#else
+#define ACCESS_MASM(masm) masm->
+#endif
+
+} } // namespace v8::internal
+
+#endif // V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
+
diff --git a/deps/v8/src/mips/register-allocator-mips-inl.h b/deps/v8/src/mips/register-allocator-mips-inl.h
new file mode 100644
index 000000000..a876bee49
--- /dev/null
+++ b/deps/v8/src/mips/register-allocator-mips-inl.h
@@ -0,0 +1,137 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_IA32_REGISTER_ALLOCATOR_MIPS_INL_H_
+#define V8_IA32_REGISTER_ALLOCATOR_MIPS_INL_H_
+
+#include "v8.h"
+#include "mips/assembler-mips.h"
+
+namespace v8 {
+namespace internal {
+
+// -------------------------------------------------------------------------
+// RegisterAllocator implementation.
+
+bool RegisterAllocator::IsReserved(Register reg) {
+ // The code for this test relies on the order of register codes.
+ return reg.is(cp) || reg.is(s8_fp) || reg.is(sp);
+}
+
+
+int RegisterAllocator::ToNumber(Register reg) {
+ ASSERT(reg.is_valid() && !IsReserved(reg));
+ const int kNumbers[] = {
+ 0, // zero_reg
+ 1, // at
+ 2, // v0
+ 3, // v1
+ 4, // a0
+ 5, // a1
+ 6, // a2
+ 7, // a3
+ 8, // t0
+ 9, // t1
+ 10, // t2
+ 11, // t3
+ 12, // t4
+ 13, // t5
+ 14, // t
+ 15, // t7
+ 16, // t8
+ 17, // t9
+ 18, // s0
+ 19, // s1
+ 20, // s2
+ 21, // s3
+ 22, // s4
+ 23, // s5
+ 24, // s6
+ 25, // s7
+ 26, // k0
+ 27, // k1
+ 28, // gp
+ 29, // sp
+ 30, // s8_fp
+ 31, // ra
+ };
+ return kNumbers[reg.code()];
+}
+
+
+Register RegisterAllocator::ToRegister(int num) {
+ ASSERT(num >= 0 && num < kNumRegisters);
+ const Register kRegisters[] = {
+ zero_reg,
+ at,
+ v0,
+ v1,
+ a0,
+ a1,
+ a2,
+ a3,
+ t0,
+ t1,
+ t2,
+ t3,
+ t4,
+ t5,
+ t6,
+ t7,
+ s0,
+ s1,
+ s2,
+ s3,
+ s4,
+ s5,
+ s6,
+ s7,
+ t8,
+ t9,
+ k0,
+ k1,
+ gp,
+ sp,
+ s8_fp,
+ ra
+ };
+ return kRegisters[num];
+}
+
+
+void RegisterAllocator::Initialize() {
+ Reset();
+ // The non-reserved a1 and ra registers are live on JS function entry.
+ Use(a1); // JS function.
+ Use(ra); // Return address.
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_IA32_REGISTER_ALLOCATOR_MIPS_INL_H_
+
diff --git a/deps/v8/src/mips/register-allocator-mips.cc b/deps/v8/src/mips/register-allocator-mips.cc
new file mode 100644
index 000000000..f48d3a655
--- /dev/null
+++ b/deps/v8/src/mips/register-allocator-mips.cc
@@ -0,0 +1,60 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "register-allocator-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// -------------------------------------------------------------------------
+// Result implementation.
+
+void Result::ToRegister() {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void Result::ToRegister(Register target) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+// -------------------------------------------------------------------------
+// RegisterAllocator implementation.
+
+Result RegisterAllocator::AllocateByteRegisterWithoutSpilling() {
+ // No byte registers on MIPS.
+ UNREACHABLE();
+ return Result();
+}
+
+
+} } // namespace v8::internal
+
diff --git a/deps/v8/src/mips/register-allocator-mips.h b/deps/v8/src/mips/register-allocator-mips.h
new file mode 100644
index 000000000..e056fb807
--- /dev/null
+++ b/deps/v8/src/mips/register-allocator-mips.h
@@ -0,0 +1,46 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_MIPS_REGISTER_ALLOCATOR_MIPS_H_
+#define V8_MIPS_REGISTER_ALLOCATOR_MIPS_H_
+
+#include "mips/constants-mips.h"
+
+namespace v8 {
+namespace internal {
+
+class RegisterAllocatorConstants : public AllStatic {
+ public:
+ static const int kNumRegisters = assembler::mips::kNumRegisters;
+ static const int kInvalidRegister = assembler::mips::kInvalidRegister;
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_MIPS_REGISTER_ALLOCATOR_MIPS_H_
+
diff --git a/deps/v8/src/mips/simulator-mips.cc b/deps/v8/src/mips/simulator-mips.cc
new file mode 100644
index 000000000..2e2dc865f
--- /dev/null
+++ b/deps/v8/src/mips/simulator-mips.cc
@@ -0,0 +1,1648 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+#include <cstdarg>
+#include "v8.h"
+
+#include "disasm.h"
+#include "assembler.h"
+#include "globals.h" // Need the bit_cast
+#include "mips/constants-mips.h"
+#include "mips/simulator-mips.h"
+
+namespace v8i = v8::internal;
+
+#if !defined(__mips)
+
+// Only build the simulator if not compiling for real MIPS hardware.
+namespace assembler {
+namespace mips {
+
+using ::v8::internal::Object;
+using ::v8::internal::PrintF;
+using ::v8::internal::OS;
+using ::v8::internal::ReadLine;
+using ::v8::internal::DeleteArray;
+
+// Utils functions
+bool HaveSameSign(int32_t a, int32_t b) {
+ return ((a ^ b) > 0);
+}
+
+
+// This macro provides a platform independent use of sscanf. The reason for
+// SScanF not being implemented in a platform independent was through
+// ::v8::internal::OS in the same way as SNPrintF is that the Windows C Run-Time
+// Library does not provide vsscanf.
+#define SScanF sscanf // NOLINT
+
+// The Debugger class is used by the simulator while debugging simulated MIPS
+// code.
+class Debugger {
+ public:
+ explicit Debugger(Simulator* sim);
+ ~Debugger();
+
+ void Stop(Instruction* instr);
+ void Debug();
+
+ private:
+ // We set the breakpoint code to 0xfffff to easily recognize it.
+ static const Instr kBreakpointInstr = SPECIAL | BREAK | 0xfffff << 6;
+ static const Instr kNopInstr = 0x0;
+
+ Simulator* sim_;
+
+ int32_t GetRegisterValue(int regnum);
+ bool GetValue(const char* desc, int32_t* value);
+
+ // Set or delete a breakpoint. Returns true if successful.
+ bool SetBreakpoint(Instruction* breakpc);
+ bool DeleteBreakpoint(Instruction* breakpc);
+
+ // Undo and redo all breakpoints. This is needed to bracket disassembly and
+ // execution to skip past breakpoints when run from the debugger.
+ void UndoBreakpoints();
+ void RedoBreakpoints();
+
+ // Print all registers with a nice formatting.
+ void PrintAllRegs();
+};
+
+Debugger::Debugger(Simulator* sim) {
+ sim_ = sim;
+}
+
+Debugger::~Debugger() {
+}
+
+#ifdef GENERATED_CODE_COVERAGE
+static FILE* coverage_log = NULL;
+
+
+static void InitializeCoverage() {
+ char* file_name = getenv("V8_GENERATED_CODE_COVERAGE_LOG");
+ if (file_name != NULL) {
+ coverage_log = fopen(file_name, "aw+");
+ }
+}
+
+
+void Debugger::Stop(Instruction* instr) {
+ UNIMPLEMENTED_MIPS();
+ char* str = reinterpret_cast<char*>(instr->InstructionBits());
+ if (strlen(str) > 0) {
+ if (coverage_log != NULL) {
+ fprintf(coverage_log, "%s\n", str);
+ fflush(coverage_log);
+ }
+ instr->SetInstructionBits(0x0); // Overwrite with nop.
+ }
+ sim_->set_pc(sim_->get_pc() + Instruction::kInstructionSize);
+}
+
+#else // ndef GENERATED_CODE_COVERAGE
+
+#define UNSUPPORTED() printf("Unsupported instruction.\n");
+
+static void InitializeCoverage() {}
+
+
+void Debugger::Stop(Instruction* instr) {
+ const char* str = reinterpret_cast<char*>(instr->InstructionBits());
+ PrintF("Simulator hit %s\n", str);
+ sim_->set_pc(sim_->get_pc() + Instruction::kInstructionSize);
+ Debug();
+}
+#endif // def GENERATED_CODE_COVERAGE
+
+
+int32_t Debugger::GetRegisterValue(int regnum) {
+ if (regnum == kNumSimuRegisters) {
+ return sim_->get_pc();
+ } else {
+ return sim_->get_register(regnum);
+ }
+}
+
+
+bool Debugger::GetValue(const char* desc, int32_t* value) {
+ int regnum = Registers::Number(desc);
+ if (regnum != kInvalidRegister) {
+ *value = GetRegisterValue(regnum);
+ return true;
+ } else {
+ return SScanF(desc, "%i", value) == 1;
+ }
+ return false;
+}
+
+
+bool Debugger::SetBreakpoint(Instruction* breakpc) {
+ // Check if a breakpoint can be set. If not return without any side-effects.
+ if (sim_->break_pc_ != NULL) {
+ return false;
+ }
+
+ // Set the breakpoint.
+ sim_->break_pc_ = breakpc;
+ sim_->break_instr_ = breakpc->InstructionBits();
+ // Not setting the breakpoint instruction in the code itself. It will be set
+ // when the debugger shell continues.
+ return true;
+}
+
+
+bool Debugger::DeleteBreakpoint(Instruction* breakpc) {
+ if (sim_->break_pc_ != NULL) {
+ sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
+ }
+
+ sim_->break_pc_ = NULL;
+ sim_->break_instr_ = 0;
+ return true;
+}
+
+
+void Debugger::UndoBreakpoints() {
+ if (sim_->break_pc_ != NULL) {
+ sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
+ }
+}
+
+
+void Debugger::RedoBreakpoints() {
+ if (sim_->break_pc_ != NULL) {
+ sim_->break_pc_->SetInstructionBits(kBreakpointInstr);
+ }
+}
+
+void Debugger::PrintAllRegs() {
+#define REG_INFO(n) Registers::Name(n), GetRegisterValue(n), GetRegisterValue(n)
+
+ PrintF("\n");
+ // at, v0, a0
+ PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
+ REG_INFO(1), REG_INFO(2), REG_INFO(4));
+ // v1, a1
+ PrintF("%26s\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
+ "", REG_INFO(3), REG_INFO(5));
+ // a2
+ PrintF("%26s\t%26s\t%3s: 0x%08x %10d\n", "", "", REG_INFO(6));
+ // a3
+ PrintF("%26s\t%26s\t%3s: 0x%08x %10d\n", "", "", REG_INFO(7));
+ PrintF("\n");
+ // t0-t7, s0-s7
+ for (int i = 0; i < 8; i++) {
+ PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
+ REG_INFO(8+i), REG_INFO(16+i));
+ }
+ PrintF("\n");
+ // t8, k0, LO
+ PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
+ REG_INFO(24), REG_INFO(26), REG_INFO(32));
+ // t9, k1, HI
+ PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
+ REG_INFO(25), REG_INFO(27), REG_INFO(33));
+ // sp, fp, gp
+ PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
+ REG_INFO(29), REG_INFO(30), REG_INFO(28));
+ // pc
+ PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
+ REG_INFO(31), REG_INFO(34));
+#undef REG_INFO
+}
+
+void Debugger::Debug() {
+ intptr_t last_pc = -1;
+ bool done = false;
+
+#define COMMAND_SIZE 63
+#define ARG_SIZE 255
+
+#define STR(a) #a
+#define XSTR(a) STR(a)
+
+ char cmd[COMMAND_SIZE + 1];
+ char arg1[ARG_SIZE + 1];
+ char arg2[ARG_SIZE + 1];
+
+ // make sure to have a proper terminating character if reaching the limit
+ cmd[COMMAND_SIZE] = 0;
+ arg1[ARG_SIZE] = 0;
+ arg2[ARG_SIZE] = 0;
+
+ // Undo all set breakpoints while running in the debugger shell. This will
+ // make them invisible to all commands.
+ UndoBreakpoints();
+
+ while (!done && (sim_->get_pc() != Simulator::end_sim_pc)) {
+ if (last_pc != sim_->get_pc()) {
+ disasm::NameConverter converter;
+ disasm::Disassembler dasm(converter);
+ // use a reasonably large buffer
+ v8::internal::EmbeddedVector<char, 256> buffer;
+ dasm.InstructionDecode(buffer,
+ reinterpret_cast<byte_*>(sim_->get_pc()));
+ PrintF(" 0x%08x %s\n", sim_->get_pc(), buffer.start());
+ last_pc = sim_->get_pc();
+ }
+ char* line = ReadLine("sim> ");
+ if (line == NULL) {
+ break;
+ } else {
+ // Use sscanf to parse the individual parts of the command line. At the
+ // moment no command expects more than two parameters.
+ int args = SScanF(line,
+ "%" XSTR(COMMAND_SIZE) "s "
+ "%" XSTR(ARG_SIZE) "s "
+ "%" XSTR(ARG_SIZE) "s",
+ cmd, arg1, arg2);
+ if ((strcmp(cmd, "si") == 0) || (strcmp(cmd, "stepi") == 0)) {
+ if (!(reinterpret_cast<Instruction*>(sim_->get_pc())->IsTrap())) {
+ sim_->InstructionDecode(
+ reinterpret_cast<Instruction*>(sim_->get_pc()));
+ } else {
+ // Allow si to jump over generated breakpoints.
+ PrintF("/!\\ Jumping over generated breakpoint.\n");
+ sim_->set_pc(sim_->get_pc() + Instruction::kInstructionSize);
+ }
+ } else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) {
+ // Execute the one instruction we broke at with breakpoints disabled.
+ sim_->InstructionDecode(reinterpret_cast<Instruction*>(sim_->get_pc()));
+ // Leave the debugger shell.
+ done = true;
+ } else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) {
+ if (args == 2) {
+ int32_t value;
+ if (strcmp(arg1, "all") == 0) {
+ PrintAllRegs();
+ } else {
+ if (GetValue(arg1, &value)) {
+ PrintF("%s: 0x%08x %d \n", arg1, value, value);
+ } else {
+ PrintF("%s unrecognized\n", arg1);
+ }
+ }
+ } else {
+ PrintF("print <register>\n");
+ }
+ } else if ((strcmp(cmd, "po") == 0)
+ || (strcmp(cmd, "printobject") == 0)) {
+ if (args == 2) {
+ int32_t value;
+ if (GetValue(arg1, &value)) {
+ Object* obj = reinterpret_cast<Object*>(value);
+ PrintF("%s: \n", arg1);
+#ifdef DEBUG
+ obj->PrintLn();
+#else
+ obj->ShortPrint();
+ PrintF("\n");
+#endif
+ } else {
+ PrintF("%s unrecognized\n", arg1);
+ }
+ } else {
+ PrintF("printobject <value>\n");
+ }
+ } else if ((strcmp(cmd, "disasm") == 0) || (strcmp(cmd, "dpc") == 0)) {
+ disasm::NameConverter converter;
+ disasm::Disassembler dasm(converter);
+ // use a reasonably large buffer
+ v8::internal::EmbeddedVector<char, 256> buffer;
+
+ byte_* cur = NULL;
+ byte_* end = NULL;
+
+ if (args == 1) {
+ cur = reinterpret_cast<byte_*>(sim_->get_pc());
+ end = cur + (10 * Instruction::kInstructionSize);
+ } else if (args == 2) {
+ int32_t value;
+ if (GetValue(arg1, &value)) {
+ cur = reinterpret_cast<byte_*>(value);
+ // no length parameter passed, assume 10 instructions
+ end = cur + (10 * Instruction::kInstructionSize);
+ }
+ } else {
+ int32_t value1;
+ int32_t value2;
+ if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
+ cur = reinterpret_cast<byte_*>(value1);
+ end = cur + (value2 * Instruction::kInstructionSize);
+ }
+ }
+
+ while (cur < end) {
+ dasm.InstructionDecode(buffer, cur);
+ PrintF(" 0x%08x %s\n", cur, buffer.start());
+ cur += Instruction::kInstructionSize;
+ }
+ } else if (strcmp(cmd, "gdb") == 0) {
+ PrintF("relinquishing control to gdb\n");
+ v8::internal::OS::DebugBreak();
+ PrintF("regaining control from gdb\n");
+ } else if (strcmp(cmd, "break") == 0) {
+ if (args == 2) {
+ int32_t value;
+ if (GetValue(arg1, &value)) {
+ if (!SetBreakpoint(reinterpret_cast<Instruction*>(value))) {
+ PrintF("setting breakpoint failed\n");
+ }
+ } else {
+ PrintF("%s unrecognized\n", arg1);
+ }
+ } else {
+ PrintF("break <address>\n");
+ }
+ } else if (strcmp(cmd, "del") == 0) {
+ if (!DeleteBreakpoint(NULL)) {
+ PrintF("deleting breakpoint failed\n");
+ }
+ } else if (strcmp(cmd, "flags") == 0) {
+ PrintF("No flags on MIPS !\n");
+ } else if (strcmp(cmd, "unstop") == 0) {
+ PrintF("Unstop command not implemented on MIPS.");
+ } else if ((strcmp(cmd, "stat") == 0) || (strcmp(cmd, "st") == 0)) {
+ // Print registers and disassemble
+ PrintAllRegs();
+ PrintF("\n");
+
+ disasm::NameConverter converter;
+ disasm::Disassembler dasm(converter);
+ // use a reasonably large buffer
+ v8::internal::EmbeddedVector<char, 256> buffer;
+
+ byte_* cur = NULL;
+ byte_* end = NULL;
+
+ if (args == 1) {
+ cur = reinterpret_cast<byte_*>(sim_->get_pc());
+ end = cur + (10 * Instruction::kInstructionSize);
+ } else if (args == 2) {
+ int32_t value;
+ if (GetValue(arg1, &value)) {
+ cur = reinterpret_cast<byte_*>(value);
+ // no length parameter passed, assume 10 instructions
+ end = cur + (10 * Instruction::kInstructionSize);
+ }
+ } else {
+ int32_t value1;
+ int32_t value2;
+ if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
+ cur = reinterpret_cast<byte_*>(value1);
+ end = cur + (value2 * Instruction::kInstructionSize);
+ }
+ }
+
+ while (cur < end) {
+ dasm.InstructionDecode(buffer, cur);
+ PrintF(" 0x%08x %s\n", cur, buffer.start());
+ cur += Instruction::kInstructionSize;
+ }
+ } else if ((strcmp(cmd, "h") == 0) || (strcmp(cmd, "help") == 0)) {
+ PrintF("cont\n");
+ PrintF(" continue execution (alias 'c')\n");
+ PrintF("stepi\n");
+ PrintF(" step one instruction (alias 'si')\n");
+ PrintF("print <register>\n");
+ PrintF(" print register content (alias 'p')\n");
+ PrintF(" use register name 'all' to print all registers\n");
+ PrintF("printobject <register>\n");
+ PrintF(" print an object from a register (alias 'po')\n");
+ PrintF("flags\n");
+ PrintF(" print flags\n");
+ PrintF("disasm [<instructions>]\n");
+ PrintF("disasm [[<address>] <instructions>]\n");
+ PrintF(" disassemble code, default is 10 instructions from pc\n");
+ PrintF("gdb\n");
+ PrintF(" enter gdb\n");
+ PrintF("break <address>\n");
+ PrintF(" set a break point on the address\n");
+ PrintF("del\n");
+ PrintF(" delete the breakpoint\n");
+ PrintF("unstop\n");
+ PrintF(" ignore the stop instruction at the current location");
+ PrintF(" from now on\n");
+ } else {
+ PrintF("Unknown command: %s\n", cmd);
+ }
+ }
+ DeleteArray(line);
+ }
+
+ // Add all the breakpoints back to stop execution and enter the debugger
+ // shell when hit.
+ RedoBreakpoints();
+
+#undef COMMAND_SIZE
+#undef ARG_SIZE
+
+#undef STR
+#undef XSTR
+}
+
+
+// Create one simulator per thread and keep it in thread local storage.
+static v8::internal::Thread::LocalStorageKey simulator_key;
+
+
+bool Simulator::initialized_ = false;
+
+
+void Simulator::Initialize() {
+ if (initialized_) return;
+ simulator_key = v8::internal::Thread::CreateThreadLocalKey();
+ initialized_ = true;
+ ::v8::internal::ExternalReference::set_redirector(&RedirectExternalReference);
+}
+
+
+Simulator::Simulator() {
+ Initialize();
+ // Setup simulator support first. Some of this information is needed to
+ // setup the architecture state.
+ size_t stack_size = 1 * 1024*1024; // allocate 1MB for stack
+ stack_ = reinterpret_cast<char*>(malloc(stack_size));
+ pc_modified_ = false;
+ icount_ = 0;
+ break_pc_ = NULL;
+ break_instr_ = 0;
+
+ // Setup architecture state.
+ // All registers are initialized to zero to start with.
+ for (int i = 0; i < kNumSimuRegisters; i++) {
+ registers_[i] = 0;
+ }
+
+ // The sp is initialized to point to the bottom (high address) of the
+ // allocated stack area. To be safe in potential stack underflows we leave
+ // some buffer below.
+ registers_[sp] = reinterpret_cast<int32_t>(stack_) + stack_size - 64;
+ // The ra and pc are initialized to a known bad value that will cause an
+ // access violation if the simulator ever tries to execute it.
+ registers_[pc] = bad_ra;
+ registers_[ra] = bad_ra;
+ InitializeCoverage();
+}
+
+
+// When the generated code calls an external reference we need to catch that in
+// the simulator. The external reference will be a function compiled for the
+// host architecture. We need to call that function instead of trying to
+// execute it with the simulator. We do that by redirecting the external
+// reference to a swi (software-interrupt) instruction that is handled by
+// the simulator. We write the original destination of the jump just at a known
+// offset from the swi instruction so the simulator knows what to call.
+class Redirection {
+ public:
+ Redirection(void* external_function, bool fp_return)
+ : external_function_(external_function),
+ swi_instruction_(rtCallRedirInstr),
+ fp_return_(fp_return),
+ next_(list_) {
+ list_ = this;
+ }
+
+ void* address_of_swi_instruction() {
+ return reinterpret_cast<void*>(&swi_instruction_);
+ }
+
+ void* external_function() { return external_function_; }
+ bool fp_return() { return fp_return_; }
+
+ static Redirection* Get(void* external_function, bool fp_return) {
+ Redirection* current;
+ for (current = list_; current != NULL; current = current->next_) {
+ if (current->external_function_ == external_function) return current;
+ }
+ return new Redirection(external_function, fp_return);
+ }
+
+ static Redirection* FromSwiInstruction(Instruction* swi_instruction) {
+ char* addr_of_swi = reinterpret_cast<char*>(swi_instruction);
+ char* addr_of_redirection =
+ addr_of_swi - OFFSET_OF(Redirection, swi_instruction_);
+ return reinterpret_cast<Redirection*>(addr_of_redirection);
+ }
+
+ private:
+ void* external_function_;
+ uint32_t swi_instruction_;
+ bool fp_return_;
+ Redirection* next_;
+ static Redirection* list_;
+};
+
+
+Redirection* Redirection::list_ = NULL;
+
+
+void* Simulator::RedirectExternalReference(void* external_function,
+ bool fp_return) {
+ Redirection* redirection = Redirection::Get(external_function, fp_return);
+ return redirection->address_of_swi_instruction();
+}
+
+
+// Get the active Simulator for the current thread.
+Simulator* Simulator::current() {
+ Initialize();
+ Simulator* sim = reinterpret_cast<Simulator*>(
+ v8::internal::Thread::GetThreadLocal(simulator_key));
+ if (sim == NULL) {
+ // TODO(146): delete the simulator object when a thread goes away.
+ sim = new Simulator();
+ v8::internal::Thread::SetThreadLocal(simulator_key, sim);
+ }
+ return sim;
+}
+
+
+// Sets the register in the architecture state. It will also deal with updating
+// Simulator internal state for special registers such as PC.
+void Simulator::set_register(int reg, int32_t value) {
+ ASSERT((reg >= 0) && (reg < kNumSimuRegisters));
+ if (reg == pc) {
+ pc_modified_ = true;
+ }
+
+ // zero register always hold 0.
+ registers_[reg] = (reg == 0) ? 0 : value;
+}
+
+void Simulator::set_fpu_register(int fpureg, int32_t value) {
+ ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ FPUregisters_[fpureg] = value;
+}
+
+void Simulator::set_fpu_register_double(int fpureg, double value) {
+ ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0));
+ *v8i::bit_cast<double*, int32_t*>(&FPUregisters_[fpureg]) = value;
+}
+
+
+// Get the register from the architecture state. This function does handle
+// the special case of accessing the PC register.
+int32_t Simulator::get_register(int reg) const {
+ ASSERT((reg >= 0) && (reg < kNumSimuRegisters));
+ if (reg == 0)
+ return 0;
+ else
+ return registers_[reg] + ((reg == pc) ? Instruction::kPCReadOffset : 0);
+}
+
+int32_t Simulator::get_fpu_register(int fpureg) const {
+ ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ return FPUregisters_[fpureg];
+}
+
+double Simulator::get_fpu_register_double(int fpureg) const {
+ ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0));
+ return *v8i::bit_cast<double*, int32_t*>(
+ const_cast<int32_t*>(&FPUregisters_[fpureg]));
+}
+
+// Raw access to the PC register.
+void Simulator::set_pc(int32_t value) {
+ pc_modified_ = true;
+ registers_[pc] = value;
+}
+
+// Raw access to the PC register without the special adjustment when reading.
+int32_t Simulator::get_pc() const {
+ return registers_[pc];
+}
+
+
+// The MIPS cannot do unaligned reads and writes. On some MIPS platforms an
+// interrupt is caused. On others it does a funky rotation thing. For now we
+// simply disallow unaligned reads, but at some point we may want to move to
+// emulating the rotate behaviour. Note that simulator runs have the runtime
+// system running directly on the host system and only generated code is
+// executed in the simulator. Since the host is typically IA32 we will not
+// get the correct MIPS-like behaviour on unaligned accesses.
+
+int Simulator::ReadW(int32_t addr, Instruction* instr) {
+ if ((addr & v8i::kPointerAlignmentMask) == 0) {
+ intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
+ return *ptr;
+ }
+ PrintF("Unaligned read at 0x%08x, pc=%p\n", addr, instr);
+ OS::Abort();
+ return 0;
+}
+
+
+void Simulator::WriteW(int32_t addr, int value, Instruction* instr) {
+ if ((addr & v8i::kPointerAlignmentMask) == 0) {
+ intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
+ *ptr = value;
+ return;
+ }
+ PrintF("Unaligned write at 0x%08x, pc=%p\n", addr, instr);
+ OS::Abort();
+}
+
+
+double Simulator::ReadD(int32_t addr, Instruction* instr) {
+ if ((addr & kDoubleAlignmentMask) == 0) {
+ double* ptr = reinterpret_cast<double*>(addr);
+ return *ptr;
+ }
+ PrintF("Unaligned read at 0x%08x, pc=%p\n", addr, instr);
+ OS::Abort();
+ return 0;
+}
+
+
+void Simulator::WriteD(int32_t addr, double value, Instruction* instr) {
+ if ((addr & kDoubleAlignmentMask) == 0) {
+ double* ptr = reinterpret_cast<double*>(addr);
+ *ptr = value;
+ return;
+ }
+ PrintF("Unaligned write at 0x%08x, pc=%p\n", addr, instr);
+ OS::Abort();
+}
+
+
+uint16_t Simulator::ReadHU(int32_t addr, Instruction* instr) {
+ if ((addr & 1) == 0) {
+ uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
+ return *ptr;
+ }
+ PrintF("Unaligned unsigned halfword read at 0x%08x, pc=%p\n", addr, instr);
+ OS::Abort();
+ return 0;
+}
+
+
+int16_t Simulator::ReadH(int32_t addr, Instruction* instr) {
+ if ((addr & 1) == 0) {
+ int16_t* ptr = reinterpret_cast<int16_t*>(addr);
+ return *ptr;
+ }
+ PrintF("Unaligned signed halfword read at 0x%08x, pc=%p\n", addr, instr);
+ OS::Abort();
+ return 0;
+}
+
+
+void Simulator::WriteH(int32_t addr, uint16_t value, Instruction* instr) {
+ if ((addr & 1) == 0) {
+ uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
+ *ptr = value;
+ return;
+ }
+ PrintF("Unaligned unsigned halfword write at 0x%08x, pc=%p\n", addr, instr);
+ OS::Abort();
+}
+
+
+void Simulator::WriteH(int32_t addr, int16_t value, Instruction* instr) {
+ if ((addr & 1) == 0) {
+ int16_t* ptr = reinterpret_cast<int16_t*>(addr);
+ *ptr = value;
+ return;
+ }
+ PrintF("Unaligned halfword write at 0x%08x, pc=%p\n", addr, instr);
+ OS::Abort();
+}
+
+
+uint32_t Simulator::ReadBU(int32_t addr) {
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
+ return *ptr & 0xff;
+}
+
+
+int32_t Simulator::ReadB(int32_t addr) {
+ int8_t* ptr = reinterpret_cast<int8_t*>(addr);
+ return ((*ptr << 24) >> 24) & 0xff;
+}
+
+
+void Simulator::WriteB(int32_t addr, uint8_t value) {
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
+ *ptr = value;
+}
+
+
+void Simulator::WriteB(int32_t addr, int8_t value) {
+ int8_t* ptr = reinterpret_cast<int8_t*>(addr);
+ *ptr = value;
+}
+
+
+// Returns the limit of the stack area to enable checking for stack overflows.
+uintptr_t Simulator::StackLimit() const {
+ // Leave a safety margin of 256 bytes to prevent overrunning the stack when
+ // pushing values.
+ return reinterpret_cast<uintptr_t>(stack_) + 256;
+}
+
+
+// Unsupported instructions use Format to print an error and stop execution.
+void Simulator::Format(Instruction* instr, const char* format) {
+ PrintF("Simulator found unsupported instruction:\n 0x%08x: %s\n",
+ instr, format);
+ UNIMPLEMENTED_MIPS();
+}
+
+
+// Calls into the V8 runtime are based on this very simple interface.
+// Note: To be able to return two values from some calls the code in runtime.cc
+// uses the ObjectPair which is essentially two 32-bit values stuffed into a
+// 64-bit value. With the code below we assume that all runtime calls return
+// 64 bits of result. If they don't, the r1 result register contains a bogus
+// value, which is fine because it is caller-saved.
+typedef int64_t (*SimulatorRuntimeCall)(int32_t arg0,
+ int32_t arg1,
+ int32_t arg2,
+ int32_t arg3);
+typedef double (*SimulatorRuntimeFPCall)(double fparg0,
+ double fparg1);
+
+
+// Software interrupt instructions are used by the simulator to call into the
+// C-based V8 runtime.
+void Simulator::SoftwareInterrupt(Instruction* instr) {
+ // We first check if we met a call_rt_redirected.
+ if (instr->InstructionBits() == rtCallRedirInstr) {
+ Redirection* redirection = Redirection::FromSwiInstruction(instr);
+ int32_t arg0 = get_register(a0);
+ int32_t arg1 = get_register(a1);
+ int32_t arg2 = get_register(a2);
+ int32_t arg3 = get_register(a3);
+ // fp args are (not always) in f12 and f14.
+ // See MIPS conventions for more details.
+ double fparg0 = get_fpu_register_double(f12);
+ double fparg1 = get_fpu_register_double(f14);
+ // This is dodgy but it works because the C entry stubs are never moved.
+ // See comment in codegen-arm.cc and bug 1242173.
+ int32_t saved_ra = get_register(ra);
+ if (redirection->fp_return()) {
+ intptr_t external =
+ reinterpret_cast<intptr_t>(redirection->external_function());
+ SimulatorRuntimeFPCall target =
+ reinterpret_cast<SimulatorRuntimeFPCall>(external);
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Call to host function at %p with args %f, %f\n",
+ FUNCTION_ADDR(target), fparg0, fparg1);
+ }
+ double result = target(fparg0, fparg1);
+ set_fpu_register_double(f0, result);
+ } else {
+ intptr_t external =
+ reinterpret_cast<int32_t>(redirection->external_function());
+ SimulatorRuntimeCall target =
+ reinterpret_cast<SimulatorRuntimeCall>(external);
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF(
+ "Call to host function at %p with args %08x, %08x, %08x, %08x\n",
+ FUNCTION_ADDR(target),
+ arg0,
+ arg1,
+ arg2,
+ arg3);
+ }
+ int64_t result = target(arg0, arg1, arg2, arg3);
+ int32_t lo_res = static_cast<int32_t>(result);
+ int32_t hi_res = static_cast<int32_t>(result >> 32);
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Returned %08x\n", lo_res);
+ }
+ set_register(v0, lo_res);
+ set_register(v1, hi_res);
+ }
+ set_register(ra, saved_ra);
+ set_pc(get_register(ra));
+ } else {
+ Debugger dbg(this);
+ dbg.Debug();
+ }
+}
+
+void Simulator::SignalExceptions() {
+ for (int i = 1; i < kNumExceptions; i++) {
+ if (exceptions[i] != 0) {
+ V8_Fatal(__FILE__, __LINE__, "Error: Exception %i raised.", i);
+ }
+ }
+}
+
+// Handle execution based on instruction types.
+void Simulator::DecodeTypeRegister(Instruction* instr) {
+ // Instruction fields
+ Opcode op = instr->OpcodeFieldRaw();
+ int32_t rs_reg = instr->RsField();
+ int32_t rs = get_register(rs_reg);
+ uint32_t rs_u = static_cast<uint32_t>(rs);
+ int32_t rt_reg = instr->RtField();
+ int32_t rt = get_register(rt_reg);
+ uint32_t rt_u = static_cast<uint32_t>(rt);
+ int32_t rd_reg = instr->RdField();
+ uint32_t sa = instr->SaField();
+
+ int32_t fs_reg= instr->FsField();
+
+ // ALU output
+ // It should not be used as is. Instructions using it should always initialize
+ // it first.
+ int32_t alu_out = 0x12345678;
+ // Output or temporary for floating point.
+ double fp_out = 0.0;
+
+ // For break and trap instructions.
+ bool do_interrupt = false;
+
+ // For jr and jalr
+ // Get current pc.
+ int32_t current_pc = get_pc();
+ // Next pc
+ int32_t next_pc = 0;
+
+ // ---------- Configuration
+ switch (op) {
+ case COP1: // Coprocessor instructions
+ switch (instr->RsFieldRaw()) {
+ case BC1: // branch on coprocessor condition
+ UNREACHABLE();
+ break;
+ case MFC1:
+ alu_out = get_fpu_register(fs_reg);
+ break;
+ case MFHC1:
+ fp_out = get_fpu_register_double(fs_reg);
+ alu_out = *v8i::bit_cast<int32_t*, double*>(&fp_out);
+ break;
+ case MTC1:
+ case MTHC1:
+ // Do the store in the execution step.
+ break;
+ case S:
+ case D:
+ case W:
+ case L:
+ case PS:
+ // Do everything in the execution step.
+ break;
+ default:
+ UNIMPLEMENTED_MIPS();
+ };
+ break;
+ case SPECIAL:
+ switch (instr->FunctionFieldRaw()) {
+ case JR:
+ case JALR:
+ next_pc = get_register(instr->RsField());
+ break;
+ case SLL:
+ alu_out = rt << sa;
+ break;
+ case SRL:
+ alu_out = rt_u >> sa;
+ break;
+ case SRA:
+ alu_out = rt >> sa;
+ break;
+ case SLLV:
+ alu_out = rt << rs;
+ break;
+ case SRLV:
+ alu_out = rt_u >> rs;
+ break;
+ case SRAV:
+ alu_out = rt >> rs;
+ break;
+ case MFHI:
+ alu_out = get_register(HI);
+ break;
+ case MFLO:
+ alu_out = get_register(LO);
+ break;
+ case MULT:
+ UNIMPLEMENTED_MIPS();
+ break;
+ case MULTU:
+ UNIMPLEMENTED_MIPS();
+ break;
+ case DIV:
+ case DIVU:
+ exceptions[kDivideByZero] = rt == 0;
+ break;
+ case ADD:
+ if (HaveSameSign(rs, rt)) {
+ if (rs > 0) {
+ exceptions[kIntegerOverflow] = rs > (Registers::kMaxValue - rt);
+ } else if (rs < 0) {
+ exceptions[kIntegerUnderflow] = rs < (Registers::kMinValue - rt);
+ }
+ }
+ alu_out = rs + rt;
+ break;
+ case ADDU:
+ alu_out = rs + rt;
+ break;
+ case SUB:
+ if (!HaveSameSign(rs, rt)) {
+ if (rs > 0) {
+ exceptions[kIntegerOverflow] = rs > (Registers::kMaxValue + rt);
+ } else if (rs < 0) {
+ exceptions[kIntegerUnderflow] = rs < (Registers::kMinValue + rt);
+ }
+ }
+ alu_out = rs - rt;
+ break;
+ case SUBU:
+ alu_out = rs - rt;
+ break;
+ case AND:
+ alu_out = rs & rt;
+ break;
+ case OR:
+ alu_out = rs | rt;
+ break;
+ case XOR:
+ alu_out = rs ^ rt;
+ break;
+ case NOR:
+ alu_out = ~(rs | rt);
+ break;
+ case SLT:
+ alu_out = rs < rt ? 1 : 0;
+ break;
+ case SLTU:
+ alu_out = rs_u < rt_u ? 1 : 0;
+ break;
+ // Break and trap instructions
+ case BREAK:
+ do_interrupt = true;
+ break;
+ case TGE:
+ do_interrupt = rs >= rt;
+ break;
+ case TGEU:
+ do_interrupt = rs_u >= rt_u;
+ break;
+ case TLT:
+ do_interrupt = rs < rt;
+ break;
+ case TLTU:
+ do_interrupt = rs_u < rt_u;
+ break;
+ case TEQ:
+ do_interrupt = rs == rt;
+ break;
+ case TNE:
+ do_interrupt = rs != rt;
+ break;
+ default:
+ UNREACHABLE();
+ };
+ break;
+ case SPECIAL2:
+ switch (instr->FunctionFieldRaw()) {
+ case MUL:
+ alu_out = rs_u * rt_u; // Only the lower 32 bits are kept.
+ break;
+ default:
+ UNREACHABLE();
+ }
+ break;
+ default:
+ UNREACHABLE();
+ };
+
+ // ---------- Raise exceptions triggered.
+ SignalExceptions();
+
+ // ---------- Execution
+ switch (op) {
+ case COP1:
+ switch (instr->RsFieldRaw()) {
+ case BC1: // branch on coprocessor condition
+ UNREACHABLE();
+ break;
+ case MFC1:
+ case MFHC1:
+ set_register(rt_reg, alu_out);
+ break;
+ case MTC1:
+ // We don't need to set the higher bits to 0, because MIPS ISA says
+ // they are in an unpredictable state after executing MTC1.
+ FPUregisters_[fs_reg] = registers_[rt_reg];
+ FPUregisters_[fs_reg+1] = Unpredictable;
+ break;
+ case MTHC1:
+ // Here we need to keep the lower bits unchanged.
+ FPUregisters_[fs_reg+1] = registers_[rt_reg];
+ break;
+ case S:
+ switch (instr->FunctionFieldRaw()) {
+ case CVT_D_S:
+ case CVT_W_S:
+ case CVT_L_S:
+ case CVT_PS_S:
+ UNIMPLEMENTED_MIPS();
+ break;
+ default:
+ UNREACHABLE();
+ }
+ break;
+ case D:
+ switch (instr->FunctionFieldRaw()) {
+ case CVT_S_D:
+ case CVT_W_D:
+ case CVT_L_D:
+ UNIMPLEMENTED_MIPS();
+ break;
+ default:
+ UNREACHABLE();
+ }
+ break;
+ case W:
+ switch (instr->FunctionFieldRaw()) {
+ case CVT_S_W:
+ UNIMPLEMENTED_MIPS();
+ break;
+ case CVT_D_W: // Convert word to double.
+ set_fpu_register(rd_reg, static_cast<double>(rs));
+ break;
+ default:
+ UNREACHABLE();
+ };
+ break;
+ case L:
+ switch (instr->FunctionFieldRaw()) {
+ case CVT_S_L:
+ case CVT_D_L:
+ UNIMPLEMENTED_MIPS();
+ break;
+ default:
+ UNREACHABLE();
+ }
+ break;
+ case PS:
+ break;
+ default:
+ UNREACHABLE();
+ };
+ break;
+ case SPECIAL:
+ switch (instr->FunctionFieldRaw()) {
+ case JR: {
+ Instruction* branch_delay_instr = reinterpret_cast<Instruction*>(
+ current_pc+Instruction::kInstructionSize);
+ BranchDelayInstructionDecode(branch_delay_instr);
+ set_pc(next_pc);
+ pc_modified_ = true;
+ break;
+ }
+ case JALR: {
+ Instruction* branch_delay_instr = reinterpret_cast<Instruction*>(
+ current_pc+Instruction::kInstructionSize);
+ BranchDelayInstructionDecode(branch_delay_instr);
+ set_register(31, current_pc + 2* Instruction::kInstructionSize);
+ set_pc(next_pc);
+ pc_modified_ = true;
+ break;
+ }
+ // Instructions using HI and LO registers.
+ case MULT:
+ case MULTU:
+ break;
+ case DIV:
+ // Divide by zero was checked in the configuration step.
+ set_register(LO, rs / rt);
+ set_register(HI, rs % rt);
+ break;
+ case DIVU:
+ set_register(LO, rs_u / rt_u);
+ set_register(HI, rs_u % rt_u);
+ break;
+ // Break and trap instructions
+ case BREAK:
+ case TGE:
+ case TGEU:
+ case TLT:
+ case TLTU:
+ case TEQ:
+ case TNE:
+ if (do_interrupt) {
+ SoftwareInterrupt(instr);
+ }
+ break;
+ default: // For other special opcodes we do the default operation.
+ set_register(rd_reg, alu_out);
+ };
+ break;
+ case SPECIAL2:
+ switch (instr->FunctionFieldRaw()) {
+ case MUL:
+ set_register(rd_reg, alu_out);
+ // HI and LO are UNPREDICTABLE after the operation.
+ set_register(LO, Unpredictable);
+ set_register(HI, Unpredictable);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ break;
+ // Unimplemented opcodes raised an error in the configuration step before,
+ // so we can use the default here to set the destination register in common
+ // cases.
+ default:
+ set_register(rd_reg, alu_out);
+ };
+}
+
+// Type 2: instructions using a 16 bytes immediate. (eg: addi, beq)
+void Simulator::DecodeTypeImmediate(Instruction* instr) {
+ // Instruction fields
+ Opcode op = instr->OpcodeFieldRaw();
+ int32_t rs = get_register(instr->RsField());
+ uint32_t rs_u = static_cast<uint32_t>(rs);
+ int32_t rt_reg = instr->RtField(); // destination register
+ int32_t rt = get_register(rt_reg);
+ int16_t imm16 = instr->Imm16Field();
+
+ int32_t ft_reg = instr->FtField(); // destination register
+ int32_t ft = get_register(ft_reg);
+
+ // zero extended immediate
+ uint32_t oe_imm16 = 0xffff & imm16;
+ // sign extended immediate
+ int32_t se_imm16 = imm16;
+
+ // Get current pc.
+ int32_t current_pc = get_pc();
+ // Next pc.
+ int32_t next_pc = bad_ra;
+
+ // Used for conditional branch instructions
+ bool do_branch = false;
+ bool execute_branch_delay_instruction = false;
+
+ // Used for arithmetic instructions
+ int32_t alu_out = 0;
+ // Floating point
+ double fp_out = 0.0;
+
+ // Used for memory instructions
+ int32_t addr = 0x0;
+
+ // ---------- Configuration (and execution for REGIMM)
+ switch (op) {
+ // ------------- COP1. Coprocessor instructions
+ case COP1:
+ switch (instr->RsFieldRaw()) {
+ case BC1: // branch on coprocessor condition
+ UNIMPLEMENTED_MIPS();
+ break;
+ default:
+ UNREACHABLE();
+ };
+ break;
+ // ------------- REGIMM class
+ case REGIMM:
+ switch (instr->RtFieldRaw()) {
+ case BLTZ:
+ do_branch = (rs < 0);
+ break;
+ case BLTZAL:
+ do_branch = rs < 0;
+ break;
+ case BGEZ:
+ do_branch = rs >= 0;
+ break;
+ case BGEZAL:
+ do_branch = rs >= 0;
+ break;
+ default:
+ UNREACHABLE();
+ };
+ switch (instr->RtFieldRaw()) {
+ case BLTZ:
+ case BLTZAL:
+ case BGEZ:
+ case BGEZAL:
+ // Branch instructions common part.
+ execute_branch_delay_instruction = true;
+ // Set next_pc
+ if (do_branch) {
+ next_pc = current_pc + (imm16 << 2) + Instruction::kInstructionSize;
+ if (instr->IsLinkingInstruction()) {
+ set_register(31, current_pc + kBranchReturnOffset);
+ }
+ } else {
+ next_pc = current_pc + kBranchReturnOffset;
+ }
+ default:
+ break;
+ };
+ break; // case REGIMM
+ // ------------- Branch instructions
+ // When comparing to zero, the encoding of rt field is always 0, so we don't
+ // need to replace rt with zero.
+ case BEQ:
+ do_branch = (rs == rt);
+ break;
+ case BNE:
+ do_branch = rs != rt;
+ break;
+ case BLEZ:
+ do_branch = rs <= 0;
+ break;
+ case BGTZ:
+ do_branch = rs > 0;
+ break;
+ // ------------- Arithmetic instructions
+ case ADDI:
+ if (HaveSameSign(rs, se_imm16)) {
+ if (rs > 0) {
+ exceptions[kIntegerOverflow] = rs > (Registers::kMaxValue - se_imm16);
+ } else if (rs < 0) {
+ exceptions[kIntegerUnderflow] =
+ rs < (Registers::kMinValue - se_imm16);
+ }
+ }
+ alu_out = rs + se_imm16;
+ break;
+ case ADDIU:
+ alu_out = rs + se_imm16;
+ break;
+ case SLTI:
+ alu_out = (rs < se_imm16) ? 1 : 0;
+ break;
+ case SLTIU:
+ alu_out = (rs_u < static_cast<uint32_t>(se_imm16)) ? 1 : 0;
+ break;
+ case ANDI:
+ alu_out = rs & oe_imm16;
+ break;
+ case ORI:
+ alu_out = rs | oe_imm16;
+ break;
+ case XORI:
+ alu_out = rs ^ oe_imm16;
+ break;
+ case LUI:
+ alu_out = (oe_imm16 << 16);
+ break;
+ // ------------- Memory instructions
+ case LB:
+ addr = rs + se_imm16;
+ alu_out = ReadB(addr);
+ break;
+ case LW:
+ addr = rs + se_imm16;
+ alu_out = ReadW(addr, instr);
+ break;
+ case LBU:
+ addr = rs + se_imm16;
+ alu_out = ReadBU(addr);
+ break;
+ case SB:
+ addr = rs + se_imm16;
+ break;
+ case SW:
+ addr = rs + se_imm16;
+ break;
+ case LWC1:
+ addr = rs + se_imm16;
+ alu_out = ReadW(addr, instr);
+ break;
+ case LDC1:
+ addr = rs + se_imm16;
+ fp_out = ReadD(addr, instr);
+ break;
+ case SWC1:
+ case SDC1:
+ addr = rs + se_imm16;
+ break;
+ default:
+ UNREACHABLE();
+ };
+
+ // ---------- Raise exceptions triggered.
+ SignalExceptions();
+
+ // ---------- Execution
+ switch (op) {
+ // ------------- Branch instructions
+ case BEQ:
+ case BNE:
+ case BLEZ:
+ case BGTZ:
+ // Branch instructions common part.
+ execute_branch_delay_instruction = true;
+ // Set next_pc
+ if (do_branch) {
+ next_pc = current_pc + (imm16 << 2) + Instruction::kInstructionSize;
+ if (instr->IsLinkingInstruction()) {
+ set_register(31, current_pc + 2* Instruction::kInstructionSize);
+ }
+ } else {
+ next_pc = current_pc + 2 * Instruction::kInstructionSize;
+ }
+ break;
+ // ------------- Arithmetic instructions
+ case ADDI:
+ case ADDIU:
+ case SLTI:
+ case SLTIU:
+ case ANDI:
+ case ORI:
+ case XORI:
+ case LUI:
+ set_register(rt_reg, alu_out);
+ break;
+ // ------------- Memory instructions
+ case LB:
+ case LW:
+ case LBU:
+ set_register(rt_reg, alu_out);
+ break;
+ case SB:
+ WriteB(addr, static_cast<int8_t>(rt));
+ break;
+ case SW:
+ WriteW(addr, rt, instr);
+ break;
+ case LWC1:
+ set_fpu_register(ft_reg, alu_out);
+ break;
+ case LDC1:
+ set_fpu_register_double(ft_reg, fp_out);
+ break;
+ case SWC1:
+ addr = rs + se_imm16;
+ WriteW(addr, get_fpu_register(ft_reg), instr);
+ break;
+ case SDC1:
+ addr = rs + se_imm16;
+ WriteD(addr, ft, instr);
+ break;
+ default:
+ break;
+ };
+
+
+ if (execute_branch_delay_instruction) {
+ // Execute branch delay slot
+ // We don't check for end_sim_pc. First it should not be met as the current
+ // pc is valid. Secondly a jump should always execute its branch delay slot.
+ Instruction* branch_delay_instr =
+ reinterpret_cast<Instruction*>(current_pc+Instruction::kInstructionSize);
+ BranchDelayInstructionDecode(branch_delay_instr);
+ }
+
+ // If needed update pc after the branch delay execution.
+ if (next_pc != bad_ra) {
+ set_pc(next_pc);
+ }
+}
+
+// Type 3: instructions using a 26 bytes immediate. (eg: j, jal)
+void Simulator::DecodeTypeJump(Instruction* instr) {
+ // Get current pc.
+ int32_t current_pc = get_pc();
+ // Get unchanged bits of pc.
+ int32_t pc_high_bits = current_pc & 0xf0000000;
+ // Next pc
+ int32_t next_pc = pc_high_bits | (instr->Imm26Field() << 2);
+
+ // Execute branch delay slot
+ // We don't check for end_sim_pc. First it should not be met as the current pc
+ // is valid. Secondly a jump should always execute its branch delay slot.
+ Instruction* branch_delay_instr =
+ reinterpret_cast<Instruction*>(current_pc+Instruction::kInstructionSize);
+ BranchDelayInstructionDecode(branch_delay_instr);
+
+ // Update pc and ra if necessary.
+ // Do this after the branch delay execution.
+ if (instr->IsLinkingInstruction()) {
+ set_register(31, current_pc + 2* Instruction::kInstructionSize);
+ }
+ set_pc(next_pc);
+ pc_modified_ = true;
+}
+
+// Executes the current instruction.
+void Simulator::InstructionDecode(Instruction* instr) {
+ pc_modified_ = false;
+ if (::v8::internal::FLAG_trace_sim) {
+ disasm::NameConverter converter;
+ disasm::Disassembler dasm(converter);
+ // use a reasonably large buffer
+ v8::internal::EmbeddedVector<char, 256> buffer;
+ dasm.InstructionDecode(buffer,
+ reinterpret_cast<byte_*>(instr));
+ PrintF(" 0x%08x %s\n", instr, buffer.start());
+ }
+
+ switch (instr->InstructionType()) {
+ case Instruction::kRegisterType:
+ DecodeTypeRegister(instr);
+ break;
+ case Instruction::kImmediateType:
+ DecodeTypeImmediate(instr);
+ break;
+ case Instruction::kJumpType:
+ DecodeTypeJump(instr);
+ break;
+ default:
+ UNSUPPORTED();
+ }
+ if (!pc_modified_) {
+ set_register(pc, reinterpret_cast<int32_t>(instr) +
+ Instruction::kInstructionSize);
+ }
+}
+
+
+
+void Simulator::Execute() {
+ // Get the PC to simulate. Cannot use the accessor here as we need the
+ // raw PC value and not the one used as input to arithmetic instructions.
+ int program_counter = get_pc();
+ if (::v8::internal::FLAG_stop_sim_at == 0) {
+ // Fast version of the dispatch loop without checking whether the simulator
+ // should be stopping at a particular executed instruction.
+ while (program_counter != end_sim_pc) {
+ Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
+ icount_++;
+ InstructionDecode(instr);
+ program_counter = get_pc();
+ }
+ } else {
+ // FLAG_stop_sim_at is at the non-default value. Stop in the debugger when
+ // we reach the particular instuction count.
+ while (program_counter != end_sim_pc) {
+ Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
+ icount_++;
+ if (icount_ == ::v8::internal::FLAG_stop_sim_at) {
+ Debugger dbg(this);
+ dbg.Debug();
+ } else {
+ InstructionDecode(instr);
+ }
+ program_counter = get_pc();
+ }
+ }
+}
+
+
+int32_t Simulator::Call(byte_* entry, int argument_count, ...) {
+ va_list parameters;
+ va_start(parameters, argument_count);
+ // Setup arguments
+
+ // First four arguments passed in registers.
+ ASSERT(argument_count >= 4);
+ set_register(a0, va_arg(parameters, int32_t));
+ set_register(a1, va_arg(parameters, int32_t));
+ set_register(a2, va_arg(parameters, int32_t));
+ set_register(a3, va_arg(parameters, int32_t));
+
+ // Remaining arguments passed on stack.
+ int original_stack = get_register(sp);
+ // Compute position of stack on entry to generated code.
+ int entry_stack = (original_stack - (argument_count - 4) * sizeof(int32_t)
+ - kArgsSlotsSize);
+ if (OS::ActivationFrameAlignment() != 0) {
+ entry_stack &= -OS::ActivationFrameAlignment();
+ }
+ // Store remaining arguments on stack, from low to high memory.
+ intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
+ for (int i = 4; i < argument_count; i++) {
+ stack_argument[i - 4 + kArgsSlotsNum] = va_arg(parameters, int32_t);
+ }
+ va_end(parameters);
+ set_register(sp, entry_stack);
+
+ // Prepare to execute the code at entry
+ set_register(pc, reinterpret_cast<int32_t>(entry));
+ // Put down marker for end of simulation. The simulator will stop simulation
+ // when the PC reaches this value. By saving the "end simulation" value into
+ // the LR the simulation stops when returning to this call point.
+ set_register(ra, end_sim_pc);
+
+ // Remember the values of callee-saved registers.
+ // The code below assumes that r9 is not used as sb (static base) in
+ // simulator code and therefore is regarded as a callee-saved register.
+ int32_t s0_val = get_register(s0);
+ int32_t s1_val = get_register(s1);
+ int32_t s2_val = get_register(s2);
+ int32_t s3_val = get_register(s3);
+ int32_t s4_val = get_register(s4);
+ int32_t s5_val = get_register(s5);
+ int32_t s6_val = get_register(s6);
+ int32_t s7_val = get_register(s7);
+ int32_t gp_val = get_register(gp);
+ int32_t sp_val = get_register(sp);
+ int32_t fp_val = get_register(fp);
+
+ // Setup the callee-saved registers with a known value. To be able to check
+ // that they are preserved properly across JS execution.
+ int32_t callee_saved_value = icount_;
+ set_register(s0, callee_saved_value);
+ set_register(s1, callee_saved_value);
+ set_register(s2, callee_saved_value);
+ set_register(s3, callee_saved_value);
+ set_register(s4, callee_saved_value);
+ set_register(s5, callee_saved_value);
+ set_register(s6, callee_saved_value);
+ set_register(s7, callee_saved_value);
+ set_register(gp, callee_saved_value);
+ set_register(fp, callee_saved_value);
+
+ // Start the simulation
+ Execute();
+
+ // Check that the callee-saved registers have been preserved.
+ CHECK_EQ(callee_saved_value, get_register(s0));
+ CHECK_EQ(callee_saved_value, get_register(s1));
+ CHECK_EQ(callee_saved_value, get_register(s2));
+ CHECK_EQ(callee_saved_value, get_register(s3));
+ CHECK_EQ(callee_saved_value, get_register(s4));
+ CHECK_EQ(callee_saved_value, get_register(s5));
+ CHECK_EQ(callee_saved_value, get_register(s6));
+ CHECK_EQ(callee_saved_value, get_register(s7));
+ CHECK_EQ(callee_saved_value, get_register(gp));
+ CHECK_EQ(callee_saved_value, get_register(fp));
+
+ // Restore callee-saved registers with the original value.
+ set_register(s0, s0_val);
+ set_register(s1, s1_val);
+ set_register(s2, s2_val);
+ set_register(s3, s3_val);
+ set_register(s4, s4_val);
+ set_register(s5, s5_val);
+ set_register(s6, s6_val);
+ set_register(s7, s7_val);
+ set_register(gp, gp_val);
+ set_register(sp, sp_val);
+ set_register(fp, fp_val);
+
+ // Pop stack passed arguments.
+ CHECK_EQ(entry_stack, get_register(sp));
+ set_register(sp, original_stack);
+
+ int32_t result = get_register(v0);
+ return result;
+}
+
+
+uintptr_t Simulator::PushAddress(uintptr_t address) {
+ int new_sp = get_register(sp) - sizeof(uintptr_t);
+ uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp);
+ *stack_slot = address;
+ set_register(sp, new_sp);
+ return new_sp;
+}
+
+
+uintptr_t Simulator::PopAddress() {
+ int current_sp = get_register(sp);
+ uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(current_sp);
+ uintptr_t address = *stack_slot;
+ set_register(sp, current_sp + sizeof(uintptr_t));
+ return address;
+}
+
+
+#undef UNSUPPORTED
+
+} } // namespace assembler::mips
+
+#endif // !defined(__mips)
+
diff --git a/deps/v8/src/mips/simulator-mips.h b/deps/v8/src/mips/simulator-mips.h
new file mode 100644
index 000000000..d5dfc301a
--- /dev/null
+++ b/deps/v8/src/mips/simulator-mips.h
@@ -0,0 +1,311 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+// Declares a Simulator for MIPS instructions if we are not generating a native
+// MIPS binary. This Simulator allows us to run and debug MIPS code generation
+// on regular desktop machines.
+// V8 calls into generated code by "calling" the CALL_GENERATED_CODE macro,
+// which will start execution in the Simulator or forwards to the real entry
+// on a MIPS HW platform.
+
+#ifndef V8_MIPS_SIMULATOR_MIPS_H_
+#define V8_MIPS_SIMULATOR_MIPS_H_
+
+#include "allocation.h"
+
+#if defined(__mips)
+
+// When running without a simulator we call the entry directly.
+#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
+ entry(p0, p1, p2, p3, p4);
+
+// The stack limit beyond which we will throw stack overflow errors in
+// generated code. Because generated code on mips uses the C stack, we
+// just use the C stack limit.
+class SimulatorStack : public v8::internal::AllStatic {
+ public:
+ static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
+ return c_limit;
+ }
+
+ static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
+ return try_catch_address;
+ }
+
+ static inline void UnregisterCTryCatch() { }
+};
+
+// Calculated the stack limit beyond which we will throw stack overflow errors.
+// This macro must be called from a C++ method. It relies on being able to take
+// the address of "this" to get a value on the current execution stack and then
+// calculates the stack limit based on that value.
+// NOTE: The check for overflow is not safe as there is no guarantee that the
+// running thread has its stack in all memory up to address 0x00000000.
+#define GENERATED_CODE_STACK_LIMIT(limit) \
+ (reinterpret_cast<uintptr_t>(this) >= limit ? \
+ reinterpret_cast<uintptr_t>(this) - limit : 0)
+
+// Call the generated regexp code directly. The entry function pointer should
+// expect seven int/pointer sized arguments and return an int.
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
+ entry(p0, p1, p2, p3, p4, p5, p6)
+
+#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
+ reinterpret_cast<TryCatch*>(try_catch_address)
+
+
+#else // #if defined(__mips)
+
+// When running with the simulator transition into simulated execution at this
+// point.
+#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
+ reinterpret_cast<Object*>(\
+ assembler::mips::Simulator::current()->Call(FUNCTION_ADDR(entry), 5, \
+ p0, p1, p2, p3, p4))
+
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
+ assembler::mips::Simulator::current()->Call(\
+ FUNCTION_ADDR(entry), 7, p0, p1, p2, p3, p4, p5, p6)
+
+#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
+ try_catch_address == NULL ? \
+ NULL : *(reinterpret_cast<TryCatch**>(try_catch_address))
+
+
+namespace assembler {
+namespace mips {
+
+class Simulator {
+ public:
+ friend class Debugger;
+
+ // Registers are declared in order. See SMRL chapter 2.
+ enum Register {
+ no_reg = -1,
+ zero_reg = 0,
+ at,
+ v0, v1,
+ a0, a1, a2, a3,
+ t0, t1, t2, t3, t4, t5, t6, t7,
+ s0, s1, s2, s3, s4, s5, s6, s7,
+ t8, t9,
+ k0, k1,
+ gp,
+ sp,
+ s8,
+ ra,
+ // LO, HI, and pc
+ LO,
+ HI,
+ pc, // pc must be the last register.
+ kNumSimuRegisters,
+ // aliases
+ fp = s8
+ };
+
+ // Coprocessor registers.
+ // Generated code will always use doubles. So we will only use even registers.
+ enum FPURegister {
+ f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11,
+ f12, f13, f14, f15, // f12 and f14 are arguments FPURegisters
+ f16, f17, f18, f19, f20, f21, f22, f23, f24, f25,
+ f26, f27, f28, f29, f30, f31,
+ kNumFPURegisters
+ };
+
+ Simulator();
+ ~Simulator();
+
+ // The currently executing Simulator instance. Potentially there can be one
+ // for each native thread.
+ static Simulator* current();
+
+ // Accessors for register state. Reading the pc value adheres to the MIPS
+ // architecture specification and is off by a 8 from the currently executing
+ // instruction.
+ void set_register(int reg, int32_t value);
+ int32_t get_register(int reg) const;
+ // Same for FPURegisters
+ void set_fpu_register(int fpureg, int32_t value);
+ void set_fpu_register_double(int fpureg, double value);
+ int32_t get_fpu_register(int fpureg) const;
+ double get_fpu_register_double(int fpureg) const;
+
+ // Special case of set_register and get_register to access the raw PC value.
+ void set_pc(int32_t value);
+ int32_t get_pc() const;
+
+ // Accessor to the internal simulator stack area.
+ uintptr_t StackLimit() const;
+
+ // Executes MIPS instructions until the PC reaches end_sim_pc.
+ void Execute();
+
+ // Call on program start.
+ static void Initialize();
+
+ // V8 generally calls into generated JS code with 5 parameters and into
+ // generated RegExp code with 7 parameters. This is a convenience function,
+ // which sets up the simulator state and grabs the result on return.
+ int32_t Call(byte_* entry, int argument_count, ...);
+
+ // Push an address onto the JS stack.
+ uintptr_t PushAddress(uintptr_t address);
+
+ // Pop an address from the JS stack.
+ uintptr_t PopAddress();
+
+ private:
+ enum special_values {
+ // Known bad pc value to ensure that the simulator does not execute
+ // without being properly setup.
+ bad_ra = -1,
+ // A pc value used to signal the simulator to stop execution. Generally
+ // the ra is set to this value on transition from native C code to
+ // simulated execution, so that the simulator can "return" to the native
+ // C code.
+ end_sim_pc = -2,
+ // Unpredictable value.
+ Unpredictable = 0xbadbeaf
+ };
+
+ // Unsupported instructions use Format to print an error and stop execution.
+ void Format(Instruction* instr, const char* format);
+
+ // Read and write memory.
+ inline uint32_t ReadBU(int32_t addr);
+ inline int32_t ReadB(int32_t addr);
+ inline void WriteB(int32_t addr, uint8_t value);
+ inline void WriteB(int32_t addr, int8_t value);
+
+ inline uint16_t ReadHU(int32_t addr, Instruction* instr);
+ inline int16_t ReadH(int32_t addr, Instruction* instr);
+ // Note: Overloaded on the sign of the value.
+ inline void WriteH(int32_t addr, uint16_t value, Instruction* instr);
+ inline void WriteH(int32_t addr, int16_t value, Instruction* instr);
+
+ inline int ReadW(int32_t addr, Instruction* instr);
+ inline void WriteW(int32_t addr, int value, Instruction* instr);
+
+ inline double ReadD(int32_t addr, Instruction* instr);
+ inline void WriteD(int32_t addr, double value, Instruction* instr);
+
+ // Operations depending on endianness.
+ // Get Double Higher / Lower word.
+ inline int32_t GetDoubleHIW(double* addr);
+ inline int32_t GetDoubleLOW(double* addr);
+ // Set Double Higher / Lower word.
+ inline int32_t SetDoubleHIW(double* addr);
+ inline int32_t SetDoubleLOW(double* addr);
+
+
+ // Executing is handled based on the instruction type.
+ void DecodeTypeRegister(Instruction* instr);
+ void DecodeTypeImmediate(Instruction* instr);
+ void DecodeTypeJump(Instruction* instr);
+
+ // Used for breakpoints and traps.
+ void SoftwareInterrupt(Instruction* instr);
+
+ // Executes one instruction.
+ void InstructionDecode(Instruction* instr);
+ // Execute one instruction placed in a branch delay slot.
+ void BranchDelayInstructionDecode(Instruction* instr) {
+ if (instr->IsForbiddenInBranchDelay()) {
+ V8_Fatal(__FILE__, __LINE__,
+ "Eror:Unexpected %i opcode in a branch delay slot.",
+ instr->OpcodeField());
+ }
+ InstructionDecode(instr);
+ }
+
+ enum Exception {
+ none,
+ kIntegerOverflow,
+ kIntegerUnderflow,
+ kDivideByZero,
+ kNumExceptions
+ };
+ int16_t exceptions[kNumExceptions];
+
+ // Exceptions.
+ void SignalExceptions();
+
+ // Runtime call support.
+ static void* RedirectExternalReference(void* external_function,
+ bool fp_return);
+
+ // Used for real time calls that takes two double values as arguments and
+ // returns a double.
+ void SetFpResult(double result);
+
+ // Architecture state.
+ // Registers.
+ int32_t registers_[kNumSimuRegisters];
+ // Coprocessor Registers.
+ int32_t FPUregisters_[kNumFPURegisters];
+
+ // Simulator support.
+ char* stack_;
+ bool pc_modified_;
+ int icount_;
+ static bool initialized_;
+
+ // Registered breakpoints.
+ Instruction* break_pc_;
+ Instr break_instr_;
+};
+
+} } // namespace assembler::mips
+
+
+// The simulator has its own stack. Thus it has a different stack limit from
+// the C-based native code. Setting the c_limit to indicate a very small
+// stack cause stack overflow errors, since the simulator ignores the input.
+// This is unlikely to be an issue in practice, though it might cause testing
+// trouble down the line.
+class SimulatorStack : public v8::internal::AllStatic {
+ public:
+ static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
+ return assembler::mips::Simulator::current()->StackLimit();
+ }
+
+ static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
+ assembler::mips::Simulator* sim = assembler::mips::Simulator::current();
+ return sim->PushAddress(try_catch_address);
+ }
+
+ static inline void UnregisterCTryCatch() {
+ assembler::mips::Simulator::current()->PopAddress();
+ }
+};
+
+#endif // defined(__mips)
+
+#endif // V8_MIPS_SIMULATOR_MIPS_H_
+
diff --git a/deps/v8/src/mips/stub-cache-mips.cc b/deps/v8/src/mips/stub-cache-mips.cc
new file mode 100644
index 000000000..a87a49b73
--- /dev/null
+++ b/deps/v8/src/mips/stub-cache-mips.cc
@@ -0,0 +1,384 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "ic-inl.h"
+#include "codegen-inl.h"
+#include "stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+void StubCache::GenerateProbe(MacroAssembler* masm,
+ Code::Flags flags,
+ Register receiver,
+ Register name,
+ Register scratch,
+ Register extra) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
+ int index,
+ Register prototype) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+// Load a fast property out of a holder object (src). In-object properties
+// are loaded directly otherwise the property is loaded from the properties
+// fixed array.
+void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
+ Register dst, Register src,
+ JSObject* holder, int index) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
+ Register receiver,
+ Register scratch,
+ Label* miss_label) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+// Generate code to load the length from a string object and return the length.
+// If the receiver object is not a string or a wrapped string object the
+// execution continues at the miss label. The register containing the
+// receiver is potentially clobbered.
+void StubCompiler::GenerateLoadStringLength2(MacroAssembler* masm,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Label* miss) {
+ UNIMPLEMENTED_MIPS();
+ __ break_(0x249);
+}
+
+
+void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Label* miss_label) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+// Generate StoreField code, value is passed in r0 register.
+// After executing generated code, the receiver_reg and name_reg
+// may be clobbered.
+void StubCompiler::GenerateStoreField(MacroAssembler* masm,
+ Builtins::Name storage_extend,
+ JSObject* object,
+ int index,
+ Map* transition,
+ Register receiver_reg,
+ Register name_reg,
+ Register scratch,
+ Label* miss_label) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+Register StubCompiler::CheckPrototypes(JSObject* object,
+ Register object_reg,
+ JSObject* holder,
+ Register holder_reg,
+ Register scratch,
+ String* name,
+ Label* miss) {
+ UNIMPLEMENTED_MIPS();
+ return at; // UNIMPLEMENTED RETURN
+}
+
+
+void StubCompiler::GenerateLoadField(JSObject* object,
+ JSObject* holder,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ int index,
+ String* name,
+ Label* miss) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void StubCompiler::GenerateLoadConstant(JSObject* object,
+ JSObject* holder,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Object* value,
+ String* name,
+ Label* miss) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+bool StubCompiler::GenerateLoadCallback(JSObject* object,
+ JSObject* holder,
+ Register receiver,
+ Register name_reg,
+ Register scratch1,
+ Register scratch2,
+ AccessorInfo* callback,
+ String* name,
+ Label* miss,
+ Failure** failure) {
+ UNIMPLEMENTED_MIPS();
+ __ break_(0x470);
+ return false; // UNIMPLEMENTED RETURN
+}
+
+
+void StubCompiler::GenerateLoadInterceptor(JSObject* object,
+ JSObject* holder,
+ LookupResult* lookup,
+ Register receiver,
+ Register name_reg,
+ Register scratch1,
+ Register scratch2,
+ String* name,
+ Label* miss) {
+ UNIMPLEMENTED_MIPS();
+ __ break_(0x505);
+}
+
+
+Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
+ UNIMPLEMENTED_MIPS();
+ return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+}
+
+
+Object* CallStubCompiler::CompileCallField(Object* object,
+ JSObject* holder,
+ int index,
+ String* name) {
+ UNIMPLEMENTED_MIPS();
+ return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+}
+
+
+Object* CallStubCompiler::CompileCallConstant(Object* object,
+ JSObject* holder,
+ JSFunction* function,
+ String* name,
+ CheckType check) {
+ UNIMPLEMENTED_MIPS();
+ return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+}
+
+
+Object* CallStubCompiler::CompileCallInterceptor(Object* object,
+ JSObject* holder,
+ String* name) {
+ UNIMPLEMENTED_MIPS();
+ __ break_(0x782);
+ return GetCode(INTERCEPTOR, name);
+}
+
+
+Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
+ GlobalObject* holder,
+ JSGlobalPropertyCell* cell,
+ JSFunction* function,
+ String* name) {
+ UNIMPLEMENTED_MIPS();
+ return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+}
+
+
+Object* StoreStubCompiler::CompileStoreField(JSObject* object,
+ int index,
+ Map* transition,
+ String* name) {
+ UNIMPLEMENTED_MIPS();
+ return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+}
+
+
+Object* StoreStubCompiler::CompileStoreCallback(JSObject* object,
+ AccessorInfo* callback,
+ String* name) {
+ UNIMPLEMENTED_MIPS();
+ __ break_(0x906);
+ return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+}
+
+
+Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
+ String* name) {
+ UNIMPLEMENTED_MIPS();
+ return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+}
+
+
+Object* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
+ JSGlobalPropertyCell* cell,
+ String* name) {
+ UNIMPLEMENTED_MIPS();
+ return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+}
+
+
+Object* LoadStubCompiler::CompileLoadField(JSObject* object,
+ JSObject* holder,
+ int index,
+ String* name) {
+ UNIMPLEMENTED_MIPS();
+ return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+}
+
+
+Object* LoadStubCompiler::CompileLoadCallback(String* name,
+ JSObject* object,
+ JSObject* holder,
+ AccessorInfo* callback) {
+ UNIMPLEMENTED_MIPS();
+ return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+}
+
+
+Object* LoadStubCompiler::CompileLoadConstant(JSObject* object,
+ JSObject* holder,
+ Object* value,
+ String* name) {
+ UNIMPLEMENTED_MIPS();
+ return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+}
+
+
+Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* object,
+ JSObject* holder,
+ String* name) {
+ UNIMPLEMENTED_MIPS();
+ return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+}
+
+
+Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
+ GlobalObject* holder,
+ JSGlobalPropertyCell* cell,
+ String* name,
+ bool is_dont_delete) {
+ UNIMPLEMENTED_MIPS();
+ return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadField(String* name,
+ JSObject* receiver,
+ JSObject* holder,
+ int index) {
+ UNIMPLEMENTED_MIPS();
+ return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name,
+ JSObject* receiver,
+ JSObject* holder,
+ AccessorInfo* callback) {
+ UNIMPLEMENTED_MIPS();
+ return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
+ JSObject* receiver,
+ JSObject* holder,
+ Object* value) {
+ UNIMPLEMENTED_MIPS();
+ return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
+ JSObject* holder,
+ String* name) {
+ UNIMPLEMENTED_MIPS();
+ return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
+ UNIMPLEMENTED_MIPS();
+ return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
+ UNIMPLEMENTED_MIPS();
+ return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+}
+
+
+// TODO(1224671): implement the fast case.
+Object* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
+ UNIMPLEMENTED_MIPS();
+ return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+}
+
+
+Object* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
+ int index,
+ Map* transition,
+ String* name) {
+ UNIMPLEMENTED_MIPS();
+ return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+}
+
+
+Object* ConstructStubCompiler::CompileConstructStub(
+ SharedFunctionInfo* shared) {
+ UNIMPLEMENTED_MIPS();
+ return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+}
+
+
+#undef __
+
+} } // namespace v8::internal
+
diff --git a/deps/v8/src/mips/virtual-frame-mips.cc b/deps/v8/src/mips/virtual-frame-mips.cc
new file mode 100644
index 000000000..fad7ec4c7
--- /dev/null
+++ b/deps/v8/src/mips/virtual-frame-mips.cc
@@ -0,0 +1,240 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "register-allocator-inl.h"
+#include "scopes.h"
+
+namespace v8 {
+namespace internal {
+
+// -------------------------------------------------------------------------
+// VirtualFrame implementation.
+
+#define __ ACCESS_MASM(masm())
+
+
+// On entry to a function, the virtual frame already contains the
+// receiver and the parameters. All initial frame elements are in
+// memory.
+VirtualFrame::VirtualFrame()
+ : elements_(parameter_count() + local_count() + kPreallocatedElements),
+ stack_pointer_(parameter_count()) { // 0-based index of TOS.
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::SyncElementBelowStackPointer(int index) {
+ UNREACHABLE();
+}
+
+
+void VirtualFrame::SyncElementByPushing(int index) {
+ UNREACHABLE();
+}
+
+
+void VirtualFrame::SyncRange(int begin, int end) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::MergeTo(VirtualFrame* expected) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::Enter() {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::Exit() {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::AllocateStackSlots() {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::SaveContextRegister() {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::RestoreContextRegister() {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::PushReceiverSlotAddress() {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+int VirtualFrame::InvalidateFrameSlotAt(int index) {
+ return kIllegalIndex;
+}
+
+
+void VirtualFrame::TakeFrameSlotAt(int index) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::StoreToFrameSlotAt(int index) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::PushTryHandler(HandlerType type) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::RawCallStub(CodeStub* stub) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::CallStub(CodeStub* stub, Result* arg) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::CallStub(CodeStub* stub, Result* arg0, Result* arg1) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::CallAlignedRuntime(Runtime::Function* f, int arg_count) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::CallAlignedRuntime(Runtime::FunctionId id, int arg_count) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
+ InvokeJSFlags flags,
+ Result* arg_count_register,
+ int arg_count) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::RawCallCodeObject(Handle<Code> code,
+ RelocInfo::Mode rmode) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::CallCodeObject(Handle<Code> code,
+ RelocInfo::Mode rmode,
+ int dropped_args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::CallCodeObject(Handle<Code> code,
+ RelocInfo::Mode rmode,
+ Result* arg,
+ int dropped_args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::CallCodeObject(Handle<Code> code,
+ RelocInfo::Mode rmode,
+ Result* arg0,
+ Result* arg1,
+ int dropped_args,
+ bool set_auto_args_slots) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::Drop(int count) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::DropFromVFrameOnly(int count) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+Result VirtualFrame::Pop() {
+ UNIMPLEMENTED_MIPS();
+ Result res = Result();
+ return res; // UNIMPLEMENTED RETUR
+}
+
+
+void VirtualFrame::EmitPop(Register reg) {
+ UNIMPLEMENTED_MIPS();
+}
+
+void VirtualFrame::EmitMultiPop(RegList regs) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::EmitPush(Register reg) {
+ UNIMPLEMENTED_MIPS();
+}
+
+void VirtualFrame::EmitMultiPush(RegList regs) {
+ UNIMPLEMENTED_MIPS();
+}
+
+void VirtualFrame::EmitArgumentSlots(RegList reglist) {
+ UNIMPLEMENTED_MIPS();
+}
+
+#undef __
+
+} } // namespace v8::internal
+
diff --git a/deps/v8/src/mips/virtual-frame-mips.h b/deps/v8/src/mips/virtual-frame-mips.h
new file mode 100644
index 000000000..79f973fb6
--- /dev/null
+++ b/deps/v8/src/mips/virtual-frame-mips.h
@@ -0,0 +1,548 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#ifndef V8_MIPS_VIRTUAL_FRAME_MIPS_H_
+#define V8_MIPS_VIRTUAL_FRAME_MIPS_H_
+
+#include "register-allocator.h"
+#include "scopes.h"
+
+namespace v8 {
+namespace internal {
+
+
+// -------------------------------------------------------------------------
+// Virtual frames
+//
+// The virtual frame is an abstraction of the physical stack frame. It
+// encapsulates the parameters, frame-allocated locals, and the expression
+// stack. It supports push/pop operations on the expression stack, as well
+// as random access to the expression stack elements, locals, and
+// parameters.
+
+class VirtualFrame : public ZoneObject {
+ public:
+ // A utility class to introduce a scope where the virtual frame is
+ // expected to remain spilled. The constructor spills the code
+ // generator's current frame, but no attempt is made to require it
+ // to stay spilled. It is intended as documentation while the code
+ // generator is being transformed.
+ class SpilledScope BASE_EMBEDDED {
+ public:
+ SpilledScope() {}
+ };
+
+ // An illegal index into the virtual frame.
+ static const int kIllegalIndex = -1;
+
+ // Construct an initial virtual frame on entry to a JS function.
+ VirtualFrame();
+
+ // Construct a virtual frame as a clone of an existing one.
+ explicit VirtualFrame(VirtualFrame* original);
+
+ CodeGenerator* cgen() { return CodeGeneratorScope::Current(); }
+ MacroAssembler* masm() { return cgen()->masm(); }
+
+ // Create a duplicate of an existing valid frame element.
+ FrameElement CopyElementAt(int index);
+
+ // The number of elements on the virtual frame.
+ int element_count() { return elements_.length(); }
+
+ // The height of the virtual expression stack.
+ int height() {
+ return element_count() - expression_base_index();
+ }
+
+ int register_location(int num) {
+ ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
+ return register_locations_[num];
+ }
+
+ int register_location(Register reg) {
+ return register_locations_[RegisterAllocator::ToNumber(reg)];
+ }
+
+ void set_register_location(Register reg, int index) {
+ register_locations_[RegisterAllocator::ToNumber(reg)] = index;
+ }
+
+ bool is_used(int num) {
+ ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
+ return register_locations_[num] != kIllegalIndex;
+ }
+
+ bool is_used(Register reg) {
+ return register_locations_[RegisterAllocator::ToNumber(reg)]
+ != kIllegalIndex;
+ }
+
+ // Add extra in-memory elements to the top of the frame to match an actual
+ // frame (eg, the frame after an exception handler is pushed). No code is
+ // emitted.
+ void Adjust(int count);
+
+ // Forget elements from the top of the frame to match an actual frame (eg,
+ // the frame after a runtime call). No code is emitted.
+ void Forget(int count) {
+ ASSERT(count >= 0);
+ ASSERT(stack_pointer_ == element_count() - 1);
+ stack_pointer_ -= count;
+ // On mips, all elements are in memory, so there is no extra bookkeeping
+ // (registers, copies, etc.) beyond dropping the elements.
+ elements_.Rewind(stack_pointer_ + 1);
+ }
+
+ // Forget count elements from the top of the frame and adjust the stack
+ // pointer downward. This is used, for example, before merging frames at
+ // break, continue, and return targets.
+ void ForgetElements(int count);
+
+ // Spill all values from the frame to memory.
+ void SpillAll();
+
+ // Spill all occurrences of a specific register from the frame.
+ void Spill(Register reg) {
+ if (is_used(reg)) SpillElementAt(register_location(reg));
+ }
+
+ // Spill all occurrences of an arbitrary register if possible. Return the
+ // register spilled or no_reg if it was not possible to free any register
+ // (ie, they all have frame-external references).
+ Register SpillAnyRegister();
+
+ // Prepare this virtual frame for merging to an expected frame by
+ // performing some state changes that do not require generating
+ // code. It is guaranteed that no code will be generated.
+ void PrepareMergeTo(VirtualFrame* expected);
+
+ // Make this virtual frame have a state identical to an expected virtual
+ // frame. As a side effect, code may be emitted to make this frame match
+ // the expected one.
+ void MergeTo(VirtualFrame* expected);
+
+ // Detach a frame from its code generator, perhaps temporarily. This
+ // tells the register allocator that it is free to use frame-internal
+ // registers. Used when the code generator's frame is switched from this
+ // one to NULL by an unconditional jump.
+ void DetachFromCodeGenerator() {
+ RegisterAllocator* cgen_allocator = cgen()->allocator();
+ for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+ if (is_used(i)) cgen_allocator->Unuse(i);
+ }
+ }
+
+ // (Re)attach a frame to its code generator. This informs the register
+ // allocator that the frame-internal register references are active again.
+ // Used when a code generator's frame is switched from NULL to this one by
+ // binding a label.
+ void AttachToCodeGenerator() {
+ RegisterAllocator* cgen_allocator = cgen()->allocator();
+ for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+ if (is_used(i)) cgen_allocator->Unuse(i);
+ }
+ }
+
+ // Emit code for the physical JS entry and exit frame sequences. After
+ // calling Enter, the virtual frame is ready for use; and after calling
+ // Exit it should not be used. Note that Enter does not allocate space in
+ // the physical frame for storing frame-allocated locals.
+ void Enter();
+ void Exit();
+
+ // Prepare for returning from the frame by spilling locals and
+ // dropping all non-locals elements in the virtual frame. This
+ // avoids generating unnecessary merge code when jumping to the
+ // shared return site. Emits code for spills.
+ void PrepareForReturn();
+
+ // Allocate and initialize the frame-allocated locals.
+ void AllocateStackSlots();
+
+ // The current top of the expression stack as an assembly operand.
+ MemOperand Top() { return MemOperand(sp, 0); }
+
+ // An element of the expression stack as an assembly operand.
+ MemOperand ElementAt(int index) {
+ return MemOperand(sp, index * kPointerSize);
+ }
+
+ // Random-access store to a frame-top relative frame element. The result
+ // becomes owned by the frame and is invalidated.
+ void SetElementAt(int index, Result* value);
+
+ // Set a frame element to a constant. The index is frame-top relative.
+ void SetElementAt(int index, Handle<Object> value) {
+ Result temp(value);
+ SetElementAt(index, &temp);
+ }
+
+ void PushElementAt(int index) {
+ PushFrameSlotAt(element_count() - index - 1);
+ }
+
+ // A frame-allocated local as an assembly operand.
+ MemOperand LocalAt(int index) {
+ ASSERT(0 <= index);
+ ASSERT(index < local_count());
+ return MemOperand(s8_fp, kLocal0Offset - index * kPointerSize);
+ }
+
+ // Push a copy of the value of a local frame slot on top of the frame.
+ void PushLocalAt(int index) {
+ PushFrameSlotAt(local0_index() + index);
+ }
+
+ // Push the value of a local frame slot on top of the frame and invalidate
+ // the local slot. The slot should be written to before trying to read
+ // from it again.
+ void TakeLocalAt(int index) {
+ TakeFrameSlotAt(local0_index() + index);
+ }
+
+ // Store the top value on the virtual frame into a local frame slot. The
+ // value is left in place on top of the frame.
+ void StoreToLocalAt(int index) {
+ StoreToFrameSlotAt(local0_index() + index);
+ }
+
+ // Push the address of the receiver slot on the frame.
+ void PushReceiverSlotAddress();
+
+ // The function frame slot.
+ MemOperand Function() { return MemOperand(s8_fp, kFunctionOffset); }
+
+ // Push the function on top of the frame.
+ void PushFunction() { PushFrameSlotAt(function_index()); }
+
+ // The context frame slot.
+ MemOperand Context() { return MemOperand(s8_fp, kContextOffset); }
+
+ // Save the value of the cp register to the context frame slot.
+ void SaveContextRegister();
+
+ // Restore the cp register from the value of the context frame
+ // slot.
+ void RestoreContextRegister();
+
+ // A parameter as an assembly operand.
+ MemOperand ParameterAt(int index) {
+ // Index -1 corresponds to the receiver.
+ ASSERT(-1 <= index); // -1 is the receiver.
+ ASSERT(index <= parameter_count());
+ uint16_t a = 0; // Number of argument slots.
+ return MemOperand(s8_fp, (1 + parameter_count() + a - index) *kPointerSize);
+ }
+
+ // Push a copy of the value of a parameter frame slot on top of the frame.
+ void PushParameterAt(int index) {
+ PushFrameSlotAt(param0_index() + index);
+ }
+
+ // Push the value of a paramter frame slot on top of the frame and
+ // invalidate the parameter slot. The slot should be written to before
+ // trying to read from it again.
+ void TakeParameterAt(int index) {
+ TakeFrameSlotAt(param0_index() + index);
+ }
+
+ // Store the top value on the virtual frame into a parameter frame slot.
+ // The value is left in place on top of the frame.
+ void StoreToParameterAt(int index) {
+ StoreToFrameSlotAt(param0_index() + index);
+ }
+
+ // The receiver frame slot.
+ MemOperand Receiver() { return ParameterAt(-1); }
+
+ // Push a try-catch or try-finally handler on top of the virtual frame.
+ void PushTryHandler(HandlerType type);
+
+ // Call stub given the number of arguments it expects on (and
+ // removes from) the stack.
+ void CallStub(CodeStub* stub, int arg_count) {
+ PrepareForCall(arg_count, arg_count);
+ RawCallStub(stub);
+ }
+
+ // Call stub that expects its argument in r0. The argument is given
+ // as a result which must be the register r0.
+ void CallStub(CodeStub* stub, Result* arg);
+
+ // Call stub that expects its arguments in r1 and r0. The arguments
+ // are given as results which must be the appropriate registers.
+ void CallStub(CodeStub* stub, Result* arg0, Result* arg1);
+
+ // Call runtime given the number of arguments expected on (and
+ // removed from) the stack.
+ void CallRuntime(Runtime::Function* f, int arg_count);
+ void CallRuntime(Runtime::FunctionId id, int arg_count);
+
+ // Call runtime with sp aligned to 8 bytes.
+ void CallAlignedRuntime(Runtime::Function* f, int arg_count);
+ void CallAlignedRuntime(Runtime::FunctionId id, int arg_count);
+
+ // Invoke builtin given the number of arguments it expects on (and
+ // removes from) the stack.
+ void InvokeBuiltin(Builtins::JavaScript id,
+ InvokeJSFlags flag,
+ Result* arg_count_register,
+ int arg_count);
+
+ // Call into an IC stub given the number of arguments it removes
+ // from the stack. Register arguments are passed as results and
+ // consumed by the call.
+ void CallCodeObject(Handle<Code> ic,
+ RelocInfo::Mode rmode,
+ int dropped_args);
+ void CallCodeObject(Handle<Code> ic,
+ RelocInfo::Mode rmode,
+ Result* arg,
+ int dropped_args);
+ void CallCodeObject(Handle<Code> ic,
+ RelocInfo::Mode rmode,
+ Result* arg0,
+ Result* arg1,
+ int dropped_args,
+ bool set_auto_args_slots = false);
+
+ // Drop a number of elements from the top of the expression stack. May
+ // emit code to affect the physical frame. Does not clobber any registers
+ // excepting possibly the stack pointer.
+ void Drop(int count);
+ // Similar to VirtualFrame::Drop but we don't modify the actual stack.
+ // This is because we need to manually restore sp to the correct position.
+ void DropFromVFrameOnly(int count);
+
+ // Drop one element.
+ void Drop() { Drop(1); }
+ void DropFromVFrameOnly() { DropFromVFrameOnly(1); }
+
+ // Duplicate the top element of the frame.
+ void Dup() { PushFrameSlotAt(element_count() - 1); }
+
+ // Pop an element from the top of the expression stack. Returns a
+ // Result, which may be a constant or a register.
+ Result Pop();
+
+ // Pop and save an element from the top of the expression stack and
+ // emit a corresponding pop instruction.
+ void EmitPop(Register reg);
+ // Same but for multiple registers
+ void EmitMultiPop(RegList regs); // higher indexed registers popped first
+ void EmitMultiPopReversed(RegList regs); // lower first
+
+ // Push an element on top of the expression stack and emit a
+ // corresponding push instruction.
+ void EmitPush(Register reg);
+ // Same but for multiple registers.
+ void EmitMultiPush(RegList regs); // lower indexed registers are pushed first
+ void EmitMultiPushReversed(RegList regs); // higher first
+
+ // Push an element on the virtual frame.
+ void Push(Register reg);
+ void Push(Handle<Object> value);
+ void Push(Smi* value) { Push(Handle<Object>(value)); }
+
+ // Pushing a result invalidates it (its contents become owned by the frame).
+ void Push(Result* result) {
+ if (result->is_register()) {
+ Push(result->reg());
+ } else {
+ ASSERT(result->is_constant());
+ Push(result->handle());
+ }
+ result->Unuse();
+ }
+
+ // Nip removes zero or more elements from immediately below the top
+ // of the frame, leaving the previous top-of-frame value on top of
+ // the frame. Nip(k) is equivalent to x = Pop(), Drop(k), Push(x).
+ void Nip(int num_dropped);
+
+ // This pushes 4 arguments slots on the stack and saves asked 'a' registers
+ // 'a' registers are arguments register a0 to a3.
+ void EmitArgumentSlots(RegList reglist);
+
+ private:
+ static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
+ static const int kFunctionOffset = JavaScriptFrameConstants::kFunctionOffset;
+ static const int kContextOffset = StandardFrameConstants::kContextOffset;
+
+ static const int kHandlerSize = StackHandlerConstants::kSize / kPointerSize;
+ static const int kPreallocatedElements = 5 + 8; // 8 expression stack slots.
+
+ ZoneList<FrameElement> elements_;
+
+ // The index of the element that is at the processor's stack pointer
+ // (the sp register).
+ int stack_pointer_;
+
+ // The index of the register frame element using each register, or
+ // kIllegalIndex if a register is not on the frame.
+ int register_locations_[RegisterAllocator::kNumRegisters];
+
+ // The number of frame-allocated locals and parameters respectively.
+ int parameter_count() { return cgen()->scope()->num_parameters(); }
+ int local_count() { return cgen()->scope()->num_stack_slots(); }
+
+ // The index of the element that is at the processor's frame pointer
+ // (the fp register). The parameters, receiver, function, and context
+ // are below the frame pointer.
+ int frame_pointer() { return parameter_count() + 3; }
+
+ // The index of the first parameter. The receiver lies below the first
+ // parameter.
+ int param0_index() { return 1; }
+
+ // The index of the context slot in the frame. It is immediately
+ // below the frame pointer.
+ int context_index() { return frame_pointer() - 1; }
+
+ // The index of the function slot in the frame. It is below the frame
+ // pointer and context slot.
+ int function_index() { return frame_pointer() - 2; }
+
+ // The index of the first local. Between the frame pointer and the
+ // locals lies the return address.
+ int local0_index() { return frame_pointer() + 2; }
+
+ // The index of the base of the expression stack.
+ int expression_base_index() { return local0_index() + local_count(); }
+
+ // Convert a frame index into a frame pointer relative offset into the
+ // actual stack.
+ int fp_relative(int index) {
+ ASSERT(index < element_count());
+ ASSERT(frame_pointer() < element_count()); // FP is on the frame.
+ return (frame_pointer() - index) * kPointerSize;
+ }
+
+ // Record an occurrence of a register in the virtual frame. This has the
+ // effect of incrementing the register's external reference count and
+ // of updating the index of the register's location in the frame.
+ void Use(Register reg, int index) {
+ ASSERT(!is_used(reg));
+ set_register_location(reg, index);
+ cgen()->allocator()->Use(reg);
+ }
+
+ // Record that a register reference has been dropped from the frame. This
+ // decrements the register's external reference count and invalidates the
+ // index of the register's location in the frame.
+ void Unuse(Register reg) {
+ ASSERT(is_used(reg));
+ set_register_location(reg, kIllegalIndex);
+ cgen()->allocator()->Unuse(reg);
+ }
+
+ // Spill the element at a particular index---write it to memory if
+ // necessary, free any associated register, and forget its value if
+ // constant.
+ void SpillElementAt(int index);
+
+ // Sync the element at a particular index. If it is a register or
+ // constant that disagrees with the value on the stack, write it to memory.
+ // Keep the element type as register or constant, and clear the dirty bit.
+ void SyncElementAt(int index);
+
+ // Sync the range of elements in [begin, end] with memory.
+ void SyncRange(int begin, int end);
+
+ // Sync a single unsynced element that lies beneath or at the stack pointer.
+ void SyncElementBelowStackPointer(int index);
+
+ // Sync a single unsynced element that lies just above the stack pointer.
+ void SyncElementByPushing(int index);
+
+ // Push a copy of a frame slot (typically a local or parameter) on top of
+ // the frame.
+ void PushFrameSlotAt(int index);
+
+ // Push a the value of a frame slot (typically a local or parameter) on
+ // top of the frame and invalidate the slot.
+ void TakeFrameSlotAt(int index);
+
+ // Store the value on top of the frame to a frame slot (typically a local
+ // or parameter).
+ void StoreToFrameSlotAt(int index);
+
+ // Spill all elements in registers. Spill the top spilled_args elements
+ // on the frame. Sync all other frame elements.
+ // Then drop dropped_args elements from the virtual frame, to match
+ // the effect of an upcoming call that will drop them from the stack.
+ void PrepareForCall(int spilled_args, int dropped_args);
+
+ // Move frame elements currently in registers or constants, that
+ // should be in memory in the expected frame, to memory.
+ void MergeMoveRegistersToMemory(VirtualFrame* expected);
+
+ // Make the register-to-register moves necessary to
+ // merge this frame with the expected frame.
+ // Register to memory moves must already have been made,
+ // and memory to register moves must follow this call.
+ // This is because some new memory-to-register moves are
+ // created in order to break cycles of register moves.
+ // Used in the implementation of MergeTo().
+ void MergeMoveRegistersToRegisters(VirtualFrame* expected);
+
+ // Make the memory-to-register and constant-to-register moves
+ // needed to make this frame equal the expected frame.
+ // Called after all register-to-memory and register-to-register
+ // moves have been made. After this function returns, the frames
+ // should be equal.
+ void MergeMoveMemoryToRegisters(VirtualFrame* expected);
+
+ // Invalidates a frame slot (puts an invalid frame element in it).
+ // Copies on the frame are correctly handled, and if this slot was
+ // the backing store of copies, the index of the new backing store
+ // is returned. Otherwise, returns kIllegalIndex.
+ // Register counts are correctly updated.
+ int InvalidateFrameSlotAt(int index);
+
+ // Call a code stub that has already been prepared for calling (via
+ // PrepareForCall).
+ void RawCallStub(CodeStub* stub);
+
+ // Calls a code object which has already been prepared for calling
+ // (via PrepareForCall).
+ void RawCallCodeObject(Handle<Code> code, RelocInfo::Mode rmode);
+
+ bool Equals(VirtualFrame* other);
+
+ // Classes that need raw access to the elements_ array.
+ friend class DeferredCode;
+ friend class JumpTarget;
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_MIPS_VIRTUAL_FRAME_MIPS_H_
+
diff --git a/deps/v8/src/mirror-delay.js b/deps/v8/src/mirror-delay.js
index 1487ce57c..e1bedfdff 100644
--- a/deps/v8/src/mirror-delay.js
+++ b/deps/v8/src/mirror-delay.js
@@ -1733,7 +1733,8 @@ ScriptMirror.prototype.value = function() {
ScriptMirror.prototype.name = function() {
- return this.script_.name;
+ // If we have name, we trust it more than sourceURL from comments
+ return this.script_.name || this.sourceUrlFromComment_();
};
@@ -1829,6 +1830,29 @@ ScriptMirror.prototype.toText = function() {
/**
+ * Returns a suggested script URL from comments in script code (if found),
+ * undefined otherwise. Used primarily by debuggers for identifying eval()'ed
+ * scripts. See
+ * http://fbug.googlecode.com/svn/branches/firebug1.1/docs/ReleaseNotes_1.1.txt
+ * for details.
+ *
+ * @return {?string} value for //@ sourceURL comment
+ */
+ScriptMirror.prototype.sourceUrlFromComment_ = function() {
+ if (!('sourceUrl_' in this) && this.source()) {
+ // TODO(608): the spaces in a regexp below had to be escaped as \040
+ // because this file is being processed by js2c whose handling of spaces
+ // in regexps is broken.
+ // We're not using \s here to prevent \n from matching.
+ var sourceUrlPattern = /\/\/@[\040\t]sourceURL=[\040\t]*(\S+)[\040\t]*$/m;
+ var match = sourceUrlPattern.exec(this.source());
+ this.sourceUrl_ = match ? match[1] : undefined;
+ }
+ return this.sourceUrl_;
+};
+
+
+/**
* Mirror object for context.
* @param {Object} data The context data
* @constructor
diff --git a/deps/v8/src/number-info.h b/deps/v8/src/number-info.h
new file mode 100644
index 000000000..c6f32e47c
--- /dev/null
+++ b/deps/v8/src/number-info.h
@@ -0,0 +1,72 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_NUMBER_INFO_H_
+#define V8_NUMBER_INFO_H_
+
+namespace v8 {
+namespace internal {
+
+class NumberInfo : public AllStatic {
+ public:
+ enum Type {
+ kUnknown = 0,
+ kNumber = 1,
+ kSmi = 3,
+ kHeapNumber = 5,
+ kUninitialized = 7
+ };
+
+ // Return the weakest (least precise) common type.
+ static Type Combine(Type a, Type b) {
+ // Make use of the order of enum values.
+ return static_cast<Type>(a & b);
+ }
+
+ static bool IsNumber(Type a) {
+ ASSERT(a != kUninitialized);
+ return ((a & kNumber) != 0);
+ }
+
+ static const char* ToString(Type a) {
+ switch (a) {
+ case kUnknown: return "UnknownType";
+ case kNumber: return "NumberType";
+ case kSmi: return "SmiType";
+ case kHeapNumber: return "HeapNumberType";
+ case kUninitialized:
+ UNREACHABLE();
+ return "UninitializedType";
+ }
+ UNREACHABLE();
+ return "Unreachable code";
+ }
+};
+
+} } // namespace v8::internal
+
+#endif // V8_NUMBER_INFO_H_
diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc
index 7e77e8164..9415bc1a1 100644
--- a/deps/v8/src/objects-debug.cc
+++ b/deps/v8/src/objects-debug.cc
@@ -1036,6 +1036,8 @@ void FunctionTemplateInfo::FunctionTemplateInfoVerify() {
void FunctionTemplateInfo::FunctionTemplateInfoPrint() {
HeapObject::PrintHeader("FunctionTemplateInfo");
+ PrintF("\n - class name: ");
+ class_name()->ShortPrint();
PrintF("\n - tag: ");
tag()->ShortPrint();
PrintF("\n - property_list: ");
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index 4355fe9e1..455a84c8d 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -2773,6 +2773,13 @@ bool JSObject::HasIndexedInterceptor() {
}
+bool JSObject::AllowsSetElementsLength() {
+ bool result = elements()->IsFixedArray();
+ ASSERT(result == (!HasPixelElements() && !HasExternalArrayElements()));
+ return result;
+}
+
+
StringDictionary* JSObject::property_dictionary() {
ASSERT(!HasFastProperties());
return StringDictionary::cast(properties());
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index 6dd1d4924..53423af52 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -219,7 +219,7 @@ Object* JSObject::GetPropertyWithFailedAccessCheck(
LookupResult* result,
String* name,
PropertyAttributes* attributes) {
- if (result->IsValid()) {
+ if (result->IsProperty()) {
switch (result->type()) {
case CALLBACKS: {
// Only allow API accessors.
@@ -242,7 +242,7 @@ Object* JSObject::GetPropertyWithFailedAccessCheck(
// Search ALL_CAN_READ accessors in prototype chain.
LookupResult r;
result->holder()->LookupRealNamedPropertyInPrototypes(name, &r);
- if (r.IsValid()) {
+ if (r.IsProperty()) {
return GetPropertyWithFailedAccessCheck(receiver,
&r,
name,
@@ -255,16 +255,16 @@ Object* JSObject::GetPropertyWithFailedAccessCheck(
// No access check in GetPropertyAttributeWithInterceptor.
LookupResult r;
result->holder()->LookupRealNamedProperty(name, &r);
- if (r.IsValid()) {
+ if (r.IsProperty()) {
return GetPropertyWithFailedAccessCheck(receiver,
&r,
name,
attributes);
}
- }
- default: {
break;
}
+ default:
+ UNREACHABLE();
}
}
@@ -280,7 +280,7 @@ PropertyAttributes JSObject::GetPropertyAttributeWithFailedAccessCheck(
LookupResult* result,
String* name,
bool continue_search) {
- if (result->IsValid()) {
+ if (result->IsProperty()) {
switch (result->type()) {
case CALLBACKS: {
// Only allow API accessors.
@@ -301,7 +301,7 @@ PropertyAttributes JSObject::GetPropertyAttributeWithFailedAccessCheck(
// Search ALL_CAN_READ accessors in prototype chain.
LookupResult r;
result->holder()->LookupRealNamedPropertyInPrototypes(name, &r);
- if (r.IsValid()) {
+ if (r.IsProperty()) {
return GetPropertyAttributeWithFailedAccessCheck(receiver,
&r,
name,
@@ -319,7 +319,7 @@ PropertyAttributes JSObject::GetPropertyAttributeWithFailedAccessCheck(
} else {
result->holder()->LocalLookupRealNamedProperty(name, &r);
}
- if (r.IsValid()) {
+ if (r.IsProperty()) {
return GetPropertyAttributeWithFailedAccessCheck(receiver,
&r,
name,
@@ -328,9 +328,8 @@ PropertyAttributes JSObject::GetPropertyAttributeWithFailedAccessCheck(
break;
}
- default: {
- break;
- }
+ default:
+ UNREACHABLE();
}
}
@@ -505,7 +504,7 @@ Object* Object::GetProperty(Object* receiver,
// holder will always be the interceptor holder and the search may
// only continue with a current object just after the interceptor
// holder in the prototype chain.
- Object* last = result->IsValid() ? result->holder() : Heap::null_value();
+ Object* last = result->IsProperty() ? result->holder() : Heap::null_value();
for (Object* current = this; true; current = current->GetPrototype()) {
if (current->IsAccessCheckNeeded()) {
// Check if we're allowed to read from the current object. Note
@@ -1463,8 +1462,12 @@ Object* JSObject::SetPropertyPostInterceptor(String* name,
// Check local property, ignore interceptor.
LookupResult result;
LocalLookupRealNamedProperty(name, &result);
- if (result.IsValid()) return SetProperty(&result, name, value, attributes);
- // Add real property.
+ if (result.IsFound()) {
+ // An existing property, a map transition or a null descriptor was
+ // found. Use set property to handle all these cases.
+ return SetProperty(&result, name, value, attributes);
+ }
+ // Add a new real property.
return AddProperty(name, value, attributes);
}
@@ -1696,8 +1699,8 @@ void JSObject::LookupCallbackSetterInPrototypes(String* name,
pt != Heap::null_value();
pt = pt->GetPrototype()) {
JSObject::cast(pt)->LocalLookupRealNamedProperty(name, result);
- if (result->IsValid()) {
- if (!result->IsTransitionType() && result->IsReadOnly()) {
+ if (result->IsProperty()) {
+ if (result->IsReadOnly()) {
result->NotFound();
return;
}
@@ -1758,7 +1761,11 @@ void JSObject::LocalLookupRealNamedProperty(String* name,
if (HasFastProperties()) {
LookupInDescriptor(name, result);
- if (result->IsValid()) {
+ if (result->IsFound()) {
+ // A property, a map transition or a null descriptor was found.
+ // We return all of these result types because
+ // LocalLookupRealNamedProperty is used when setting properties
+ // where map transitions and null descriptors are handled.
ASSERT(result->holder() == this && result->type() != NORMAL);
// Disallow caching for uninitialized constants. These can only
// occur as fields.
@@ -1808,16 +1815,7 @@ void JSObject::LookupRealNamedPropertyInPrototypes(String* name,
pt != Heap::null_value();
pt = JSObject::cast(pt)->GetPrototype()) {
JSObject::cast(pt)->LocalLookupRealNamedProperty(name, result);
- if (result->IsValid()) {
- switch (result->type()) {
- case NORMAL:
- case FIELD:
- case CONSTANT_FUNCTION:
- case CALLBACKS:
- return;
- default: break;
- }
- }
+ if (result->IsProperty() && (result->type() != INTERCEPTOR)) return;
}
result->NotFound();
}
@@ -1903,14 +1901,15 @@ Object* JSObject::SetProperty(LookupResult* result,
// accessor that wants to handle the property.
LookupResult accessor_result;
LookupCallbackSetterInPrototypes(name, &accessor_result);
- if (accessor_result.IsValid()) {
+ if (accessor_result.IsProperty()) {
return SetPropertyWithCallback(accessor_result.GetCallbackObject(),
name,
value,
accessor_result.holder());
}
}
- if (result->IsNotFound()) {
+ if (!result->IsFound()) {
+ // Neither properties nor transitions found.
return AddProperty(name, value, attributes);
}
if (!result->IsLoaded()) {
@@ -1972,15 +1971,12 @@ Object* JSObject::IgnoreAttributesAndSetLocalProperty(
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
AssertNoContextChange ncc;
- // ADDED TO CLONE
- LookupResult result_struct;
- LocalLookup(name, &result_struct);
- LookupResult* result = &result_struct;
- // END ADDED TO CLONE
+ LookupResult result;
+ LocalLookup(name, &result);
// Check access rights if needed.
if (IsAccessCheckNeeded()
- && !Top::MayNamedAccess(this, name, v8::ACCESS_SET)) {
- return SetPropertyWithFailedAccessCheck(result, name, value);
+ && !Top::MayNamedAccess(this, name, v8::ACCESS_SET)) {
+ return SetPropertyWithFailedAccessCheck(&result, name, value);
}
if (IsJSGlobalProxy()) {
@@ -1994,31 +1990,34 @@ Object* JSObject::IgnoreAttributesAndSetLocalProperty(
}
// Check for accessor in prototype chain removed here in clone.
- if (result->IsNotFound()) {
+ if (!result.IsFound()) {
+ // Neither properties nor transitions found.
return AddProperty(name, value, attributes);
}
- if (!result->IsLoaded()) {
- return SetLazyProperty(result, name, value, attributes);
+ if (!result.IsLoaded()) {
+ return SetLazyProperty(&result, name, value, attributes);
}
+ PropertyDetails details = PropertyDetails(attributes, NORMAL);
+
// Check of IsReadOnly removed from here in clone.
- switch (result->type()) {
+ switch (result.type()) {
case NORMAL:
- return SetNormalizedProperty(result, value);
+ return SetNormalizedProperty(name, value, details);
case FIELD:
- return FastPropertyAtPut(result->GetFieldIndex(), value);
+ return FastPropertyAtPut(result.GetFieldIndex(), value);
case MAP_TRANSITION:
- if (attributes == result->GetAttributes()) {
+ if (attributes == result.GetAttributes()) {
// Only use map transition if the attributes match.
- return AddFastPropertyUsingMap(result->GetTransitionMap(),
+ return AddFastPropertyUsingMap(result.GetTransitionMap(),
name,
value);
}
return ConvertDescriptorToField(name, value, attributes);
case CONSTANT_FUNCTION:
// Only replace the function if necessary.
- if (value == result->GetConstantFunction()) return value;
+ if (value == result.GetConstantFunction()) return value;
// Preserve the attributes of this existing property.
- attributes = result->GetAttributes();
+ attributes = result.GetAttributes();
return ConvertDescriptorToField(name, value, attributes);
case CALLBACKS:
case INTERCEPTOR:
@@ -2134,7 +2133,7 @@ PropertyAttributes JSObject::GetPropertyAttribute(JSObject* receiver,
name,
continue_search);
}
- if (result->IsValid()) {
+ if (result->IsProperty()) {
switch (result->type()) {
case NORMAL: // fall through
case FIELD:
@@ -2144,13 +2143,8 @@ PropertyAttributes JSObject::GetPropertyAttribute(JSObject* receiver,
case INTERCEPTOR:
return result->holder()->
GetPropertyAttributeWithInterceptor(receiver, name, continue_search);
- case MAP_TRANSITION:
- case CONSTANT_TRANSITION:
- case NULL_DESCRIPTOR:
- return ABSENT;
default:
UNREACHABLE();
- break;
}
}
return ABSENT;
@@ -2323,7 +2317,7 @@ Object* JSObject::DeletePropertyPostInterceptor(String* name, DeleteMode mode) {
// Check local property, ignore interceptor.
LookupResult result;
LocalLookupRealNamedProperty(name, &result);
- if (!result.IsValid()) return Heap::true_value();
+ if (!result.IsProperty()) return Heap::true_value();
// Normalize object if needed.
Object* obj = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
@@ -2507,7 +2501,7 @@ Object* JSObject::DeleteProperty(String* name, DeleteMode mode) {
} else {
LookupResult result;
LocalLookup(name, &result);
- if (!result.IsValid()) return Heap::true_value();
+ if (!result.IsProperty()) return Heap::true_value();
// Ignore attributes if forcing a deletion.
if (result.IsDontDelete() && mode != FORCE_DELETION) {
return Heap::false_value();
@@ -2742,7 +2736,7 @@ void JSObject::Lookup(String* name, LookupResult* result) {
current != Heap::null_value();
current = JSObject::cast(current)->GetPrototype()) {
JSObject::cast(current)->LocalLookup(name, result);
- if (result->IsValid() && !result->IsTransitionType()) return;
+ if (result->IsProperty()) return;
}
result->NotFound();
}
@@ -2754,7 +2748,7 @@ void JSObject::LookupCallback(String* name, LookupResult* result) {
current != Heap::null_value();
current = JSObject::cast(current)->GetPrototype()) {
JSObject::cast(current)->LocalLookupRealNamedProperty(name, result);
- if (result->IsValid() && result->type() == CALLBACKS) return;
+ if (result->IsProperty() && result->type() == CALLBACKS) return;
}
result->NotFound();
}
@@ -2784,7 +2778,7 @@ Object* JSObject::DefineGetterSetter(String* name,
// cause security problems.
LookupResult callback_result;
LookupCallback(name, &callback_result);
- if (callback_result.IsValid()) {
+ if (callback_result.IsFound()) {
Object* obj = callback_result.GetCallbackObject();
if (obj->IsAccessorInfo() &&
AccessorInfo::cast(obj)->prohibits_overwriting()) {
@@ -2835,11 +2829,16 @@ Object* JSObject::DefineGetterSetter(String* name,
// Lookup the name.
LookupResult result;
LocalLookup(name, &result);
- if (result.IsValid()) {
+ if (result.IsProperty()) {
if (result.IsReadOnly()) return Heap::undefined_value();
if (result.type() == CALLBACKS) {
Object* obj = result.GetCallbackObject();
if (obj->IsFixedArray()) {
+ // The object might be in fast mode even though it has
+ // a getter/setter.
+ Object* ok = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
+ if (ok->IsFailure()) return ok;
+
PropertyDetails details = PropertyDetails(attributes, CALLBACKS);
SetNormalizedProperty(name, obj, details);
return obj;
@@ -2952,7 +2951,7 @@ Object* JSObject::LookupAccessor(String* name, bool is_getter) {
obj = JSObject::cast(obj)->GetPrototype()) {
LookupResult result;
JSObject::cast(obj)->LocalLookup(name, &result);
- if (result.IsValid()) {
+ if (result.IsProperty()) {
if (result.IsReadOnly()) return Heap::undefined_value();
if (result.type() == CALLBACKS) {
Object* obj = result.GetCallbackObject();
@@ -4820,6 +4819,40 @@ int SharedFunctionInfo::CalculateInObjectProperties() {
}
+bool SharedFunctionInfo::CanGenerateInlineConstructor(Object* prototype) {
+ // Check the basic conditions for generating inline constructor code.
+ if (!FLAG_inline_new
+ || !has_only_simple_this_property_assignments()
+ || this_property_assignments_count() == 0) {
+ return false;
+ }
+
+ // If the prototype is null inline constructors cause no problems.
+ if (!prototype->IsJSObject()) {
+ ASSERT(prototype->IsNull());
+ return true;
+ }
+
+ // Traverse the proposed prototype chain looking for setters for properties of
+ // the same names as are set by the inline constructor.
+ for (Object* obj = prototype;
+ obj != Heap::null_value();
+ obj = obj->GetPrototype()) {
+ JSObject* js_object = JSObject::cast(obj);
+ for (int i = 0; i < this_property_assignments_count(); i++) {
+ LookupResult result;
+ String* name = GetThisPropertyAssignmentName(i);
+ js_object->LocalLookupRealNamedProperty(name, &result);
+ if (result.IsProperty() && result.type() == CALLBACKS) {
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+
void SharedFunctionInfo::SetThisPropertyAssignmentsInfo(
bool only_simple_this_property_assignments,
FixedArray* assignments) {
@@ -4875,7 +4908,6 @@ Object* SharedFunctionInfo::GetThisPropertyAssignmentConstant(int index) {
}
-
// Support function for printing the source code to a StringStream
// without any allocation in the heap.
void SharedFunctionInfo::SourceCodePrint(StringStream* accumulator,
@@ -5273,7 +5305,7 @@ static Object* ArrayLengthRangeError() {
Object* JSObject::SetElementsLength(Object* len) {
// We should never end in here with a pixel or external array.
- ASSERT(!HasPixelElements() && !HasExternalArrayElements());
+ ASSERT(AllowsSetElementsLength());
Object* smi_length = len->ToSmi();
if (smi_length->IsSmi()) {
@@ -5353,6 +5385,48 @@ Object* JSObject::SetElementsLength(Object* len) {
}
+Object* JSObject::SetPrototype(Object* value,
+ bool skip_hidden_prototypes) {
+ // Silently ignore the change if value is not a JSObject or null.
+ // SpiderMonkey behaves this way.
+ if (!value->IsJSObject() && !value->IsNull()) return value;
+
+ // Before we can set the prototype we need to be sure
+ // prototype cycles are prevented.
+ // It is sufficient to validate that the receiver is not in the new prototype
+ // chain.
+ for (Object* pt = value; pt != Heap::null_value(); pt = pt->GetPrototype()) {
+ if (JSObject::cast(pt) == this) {
+ // Cycle detected.
+ HandleScope scope;
+ return Top::Throw(*Factory::NewError("cyclic_proto",
+ HandleVector<Object>(NULL, 0)));
+ }
+ }
+
+ JSObject* real_receiver = this;
+
+ if (skip_hidden_prototypes) {
+ // Find the first object in the chain whose prototype object is not
+ // hidden and set the new prototype on that object.
+ Object* current_proto = real_receiver->GetPrototype();
+ while (current_proto->IsJSObject() &&
+ JSObject::cast(current_proto)->map()->is_hidden_prototype()) {
+ real_receiver = JSObject::cast(current_proto);
+ current_proto = current_proto->GetPrototype();
+ }
+ }
+
+ // Set the new prototype of the object.
+ Object* new_map = real_receiver->map()->CopyDropTransitions();
+ if (new_map->IsFailure()) return new_map;
+ Map::cast(new_map)->set_prototype(value);
+ real_receiver->set_map(Map::cast(new_map));
+
+ return value;
+}
+
+
bool JSObject::HasElementPostInterceptor(JSObject* receiver, uint32_t index) {
switch (GetElementsKind()) {
case FAST_ELEMENTS: {
@@ -6170,7 +6244,9 @@ Object* JSObject::GetPropertyPostInterceptor(JSObject* receiver,
// Check local property in holder, ignore interceptor.
LookupResult result;
LocalLookupRealNamedProperty(name, &result);
- if (result.IsValid()) return GetProperty(receiver, &result, name, attributes);
+ if (result.IsProperty()) {
+ return GetProperty(receiver, &result, name, attributes);
+ }
// Continue searching via the prototype chain.
Object* pt = GetPrototype();
*attributes = ABSENT;
@@ -6186,8 +6262,10 @@ Object* JSObject::GetLocalPropertyPostInterceptor(
// Check local property in holder, ignore interceptor.
LookupResult result;
LocalLookupRealNamedProperty(name, &result);
- if (!result.IsValid()) return Heap::undefined_value();
- return GetProperty(receiver, &result, name, attributes);
+ if (result.IsProperty()) {
+ return GetProperty(receiver, &result, name, attributes);
+ }
+ return Heap::undefined_value();
}
@@ -6239,24 +6317,7 @@ bool JSObject::HasRealNamedProperty(String* key) {
LookupResult result;
LocalLookupRealNamedProperty(key, &result);
- if (result.IsValid()) {
- switch (result.type()) {
- case NORMAL: // fall through.
- case FIELD: // fall through.
- case CALLBACKS: // fall through.
- case CONSTANT_FUNCTION:
- return true;
- case INTERCEPTOR:
- case MAP_TRANSITION:
- case CONSTANT_TRANSITION:
- case NULL_DESCRIPTOR:
- return false;
- default:
- UNREACHABLE();
- }
- }
-
- return false;
+ return result.IsProperty() && (result.type() != INTERCEPTOR);
}
@@ -6318,7 +6379,7 @@ bool JSObject::HasRealNamedCallbackProperty(String* key) {
LookupResult result;
LocalLookupRealNamedProperty(key, &result);
- return result.IsValid() && (result.type() == CALLBACKS);
+ return result.IsProperty() && (result.type() == CALLBACKS);
}
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index f6411965a..3ed0a705e 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -34,6 +34,8 @@
#include "unicode-inl.h"
#if V8_TARGET_ARCH_ARM
#include "arm/constants-arm.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "mips/constants-mips.h"
#endif
//
@@ -1101,7 +1103,6 @@ class HeapNumber: public HeapObject {
# define BIG_ENDIAN_FLOATING_POINT 1
#endif
static const int kSize = kValueOffset + kDoubleSize;
-
static const uint32_t kSignMask = 0x80000000u;
static const uint32_t kExponentMask = 0x7ff00000u;
static const uint32_t kMantissaMask = 0xfffffu;
@@ -1160,6 +1161,7 @@ class JSObject: public HeapObject {
inline bool HasExternalIntElements();
inline bool HasExternalUnsignedIntElements();
inline bool HasExternalFloatElements();
+ inline bool AllowsSetElementsLength();
inline NumberDictionary* element_dictionary(); // Gets slow elements.
// Collects elements starting at index 0.
@@ -1316,6 +1318,9 @@ class JSObject: public HeapObject {
// Return the object's prototype (might be Heap::null_value()).
inline Object* GetPrototype();
+ // Set the object's prototype (only JSObject and null are allowed).
+ Object* SetPrototype(Object* value, bool skip_hidden_prototypes);
+
// Tells whether the index'th element is present.
inline bool HasElement(uint32_t index);
bool HasElementWithReceiver(JSObject* receiver, uint32_t index);
@@ -3230,6 +3235,10 @@ class SharedFunctionInfo: public HeapObject {
inline bool try_full_codegen();
inline void set_try_full_codegen(bool flag);
+ // Check whether a inlined constructor can be generated with the given
+ // prototype.
+ bool CanGenerateInlineConstructor(Object* prototype);
+
// For functions which only contains this property assignments this provides
// access to the names for the properties assigned.
DECL_ACCESSORS(this_property_assignments, Object)
diff --git a/deps/v8/src/parser.cc b/deps/v8/src/parser.cc
index b06d86f59..5058296db 100644
--- a/deps/v8/src/parser.cc
+++ b/deps/v8/src/parser.cc
@@ -1690,7 +1690,8 @@ void* Parser::ParseSourceElements(ZoneListWrapper<Statement>* processor,
// Propagate the collected information on this property assignments.
if (top_scope_->is_function_scope()) {
bool only_simple_this_property_assignments =
- this_property_assignment_finder.only_simple_this_property_assignments();
+ this_property_assignment_finder.only_simple_this_property_assignments()
+ && top_scope_->declarations()->length() == 0;
if (only_simple_this_property_assignments) {
temp_scope_->SetThisPropertyAssignmentInfo(
only_simple_this_property_assignments,
diff --git a/deps/v8/src/platform-linux.cc b/deps/v8/src/platform-linux.cc
index 005b1deb6..e890f94aa 100644
--- a/deps/v8/src/platform-linux.cc
+++ b/deps/v8/src/platform-linux.cc
@@ -89,6 +89,8 @@ uint64_t OS::CpuFeaturesImpliedByPlatform() {
// Here gcc is telling us that we are on an ARM and gcc is assuming that we
// have VFP3 instructions. If gcc can assume it then so can we.
return 1u << VFP3;
+#elif CAN_USE_ARMV7_INSTRUCTIONS
+ return 1u << ARMv7;
#else
return 0; // Linux runs on anything.
#endif
@@ -113,6 +115,9 @@ bool OS::ArmCpuHasFeature(CpuFeature feature) {
case VFP3:
search_string = "vfp";
break;
+ case ARMv7:
+ search_string = "ARMv7";
+ break;
default:
UNREACHABLE();
}
@@ -151,11 +156,12 @@ int OS::ActivationFrameAlignment() {
// On EABI ARM targets this is required for fp correctness in the
// runtime system.
return 8;
-#else
+#elif V8_TARGET_ARCH_MIPS
+ return 8;
+#endif
// With gcc 4.4 the tree vectorization optimiser can generate code
// that requires 16 byte alignment such as movdqa on x86.
return 16;
-#endif
}
@@ -262,6 +268,8 @@ void OS::DebugBreak() {
// which is the architecture of generated code).
#if defined(__arm__) || defined(__thumb__)
asm("bkpt 0");
+#elif defined(__mips__)
+ asm("break");
#else
asm("int $3");
#endif
@@ -713,6 +721,7 @@ static inline bool IsVmThread() {
static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
+#ifndef V8_HOST_ARCH_MIPS
USE(info);
if (signal != SIGPROF) return;
if (active_sampler_ == NULL) return;
@@ -743,6 +752,9 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
sample.sp = reinterpret_cast<Address>(mcontext.arm_sp);
sample.fp = reinterpret_cast<Address>(mcontext.arm_fp);
#endif
+#elif V8_HOST_ARCH_MIPS
+ // Implement this on MIPS.
+ UNIMPLEMENTED();
#endif
if (IsVmThread())
active_sampler_->SampleStack(&sample);
@@ -752,6 +764,7 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
sample.state = Logger::state();
active_sampler_->Tick(&sample);
+#endif
}
diff --git a/deps/v8/src/property.cc b/deps/v8/src/property.cc
index caa739756..b579b687b 100644
--- a/deps/v8/src/property.cc
+++ b/deps/v8/src/property.cc
@@ -33,7 +33,7 @@ namespace internal {
#ifdef DEBUG
void LookupResult::Print() {
- if (!IsValid()) {
+ if (!IsFound()) {
PrintF("Not Found\n");
return;
}
diff --git a/deps/v8/src/property.h b/deps/v8/src/property.h
index 1869719f1..dc513484b 100644
--- a/deps/v8/src/property.h
+++ b/deps/v8/src/property.h
@@ -201,23 +201,17 @@ class LookupResult BASE_EMBEDDED {
}
JSObject* holder() {
- ASSERT(IsValid());
+ ASSERT(IsFound());
return holder_;
}
PropertyType type() {
- ASSERT(IsValid());
+ ASSERT(IsFound());
return details_.type();
}
- bool IsTransitionType() {
- PropertyType t = type();
- if (t == MAP_TRANSITION || t == CONSTANT_TRANSITION) return true;
- return false;
- }
-
PropertyAttributes GetAttributes() {
- ASSERT(IsValid());
+ ASSERT(IsFound());
return details_.attributes();
}
@@ -229,14 +223,17 @@ class LookupResult BASE_EMBEDDED {
bool IsDontDelete() { return details_.IsDontDelete(); }
bool IsDontEnum() { return details_.IsDontEnum(); }
bool IsDeleted() { return details_.IsDeleted(); }
+ bool IsFound() { return lookup_type_ != NOT_FOUND; }
- bool IsValid() { return lookup_type_ != NOT_FOUND; }
- bool IsNotFound() { return lookup_type_ == NOT_FOUND; }
-
- // Tells whether the result is a property.
- // Excluding transitions and the null descriptor.
+ // Is the result is a property excluding transitions and the null
+ // descriptor?
bool IsProperty() {
- return IsValid() && type() < FIRST_PHANTOM_PROPERTY_TYPE;
+ return IsFound() && (type() < FIRST_PHANTOM_PROPERTY_TYPE);
+ }
+
+ // Is the result a property or a transition?
+ bool IsPropertyOrTransition() {
+ return IsFound() && (type() != NULL_DESCRIPTOR);
}
bool IsCacheable() { return cacheable_; }
diff --git a/deps/v8/src/register-allocator-inl.h b/deps/v8/src/register-allocator-inl.h
index 8fb498b7f..a99f45508 100644
--- a/deps/v8/src/register-allocator-inl.h
+++ b/deps/v8/src/register-allocator-inl.h
@@ -38,6 +38,8 @@
#include "x64/register-allocator-x64-inl.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/register-allocator-arm-inl.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "mips/register-allocator-mips-inl.h"
#else
#error Unsupported target architecture.
#endif
@@ -46,6 +48,20 @@
namespace v8 {
namespace internal {
+Result::Result(const Result& other) {
+ other.CopyTo(this);
+}
+
+
+Result& Result::operator=(const Result& other) {
+ if (this != &other) {
+ Unuse();
+ other.CopyTo(this);
+ }
+ return *this;
+}
+
+
Result::~Result() {
if (is_register()) {
CodeGeneratorScope::Current()->allocator()->Unuse(reg());
@@ -69,6 +85,25 @@ void Result::CopyTo(Result* destination) const {
}
+bool RegisterAllocator::is_used(Register reg) {
+ return registers_.is_used(ToNumber(reg));
+}
+
+
+int RegisterAllocator::count(Register reg) {
+ return registers_.count(ToNumber(reg));
+}
+
+
+void RegisterAllocator::Use(Register reg) {
+ registers_.Use(ToNumber(reg));
+}
+
+
+void RegisterAllocator::Unuse(Register reg) {
+ registers_.Unuse(ToNumber(reg));
+}
+
} } // namespace v8::internal
#endif // V8_REGISTER_ALLOCATOR_INL_H_
diff --git a/deps/v8/src/register-allocator.cc b/deps/v8/src/register-allocator.cc
index d55f949d8..349cc246d 100644
--- a/deps/v8/src/register-allocator.cc
+++ b/deps/v8/src/register-allocator.cc
@@ -37,10 +37,12 @@ namespace internal {
// Result implementation.
-Result::Result(Register reg) {
+Result::Result(Register reg, NumberInfo::Type info) {
ASSERT(reg.is_valid() && !RegisterAllocator::IsReserved(reg));
CodeGeneratorScope::Current()->allocator()->Use(reg);
- value_ = TypeField::encode(REGISTER) | DataField::encode(reg.code_);
+ value_ = TypeField::encode(REGISTER)
+ | NumberInfoField::encode(info)
+ | DataField::encode(reg.code_);
}
@@ -50,6 +52,23 @@ Result::ZoneObjectList* Result::ConstantList() {
}
+NumberInfo::Type Result::number_info() {
+ ASSERT(is_valid());
+ if (!is_constant()) return NumberInfoField::decode(value_);
+ Handle<Object> value = handle();
+ if (value->IsSmi()) return NumberInfo::kSmi;
+ if (value->IsHeapNumber()) return NumberInfo::kHeapNumber;
+ return NumberInfo::kUnknown;
+}
+
+
+void Result::set_number_info(NumberInfo::Type info) {
+ ASSERT(is_valid());
+ value_ = value_ & ~NumberInfoField::mask();
+ value_ = value_ | NumberInfoField::encode(info);
+}
+
+
// -------------------------------------------------------------------------
// RegisterAllocator implementation.
diff --git a/deps/v8/src/register-allocator.h b/deps/v8/src/register-allocator.h
index 1765633cd..4ec0bb4db 100644
--- a/deps/v8/src/register-allocator.h
+++ b/deps/v8/src/register-allocator.h
@@ -29,6 +29,7 @@
#define V8_REGISTER_ALLOCATOR_H_
#include "macro-assembler.h"
+#include "number-info.h"
#if V8_TARGET_ARCH_IA32
#include "ia32/register-allocator-ia32.h"
@@ -36,6 +37,8 @@
#include "x64/register-allocator-x64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/register-allocator-arm.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "mips/register-allocator-mips.h"
#else
#error Unsupported target architecture.
#endif
@@ -62,28 +65,21 @@ class Result BASE_EMBEDDED {
Result() { invalidate(); }
// Construct a register Result.
- explicit Result(Register reg);
+ explicit Result(Register reg, NumberInfo::Type info = NumberInfo::kUnknown);
// Construct a Result whose value is a compile-time constant.
explicit Result(Handle<Object> value) {
value_ = TypeField::encode(CONSTANT)
+ | NumberInfoField::encode(NumberInfo::kUninitialized)
| DataField::encode(ConstantList()->length());
ConstantList()->Add(value);
}
// The copy constructor and assignment operators could each create a new
// register reference.
- Result(const Result& other) {
- other.CopyTo(this);
- }
+ inline Result(const Result& other);
- Result& operator=(const Result& other) {
- if (this != &other) {
- Unuse();
- other.CopyTo(this);
- }
- return *this;
- }
+ inline Result& operator=(const Result& other);
inline ~Result();
@@ -105,6 +101,14 @@ class Result BASE_EMBEDDED {
void invalidate() { value_ = TypeField::encode(INVALID); }
+ NumberInfo::Type number_info();
+ void set_number_info(NumberInfo::Type info);
+ bool is_number() {
+ return (number_info() & NumberInfo::kNumber) != 0;
+ }
+ bool is_smi() { return number_info() == NumberInfo::kSmi; }
+ bool is_heap_number() { return number_info() == NumberInfo::kHeapNumber; }
+
bool is_valid() const { return type() != INVALID; }
bool is_register() const { return type() == REGISTER; }
bool is_constant() const { return type() == CONSTANT; }
@@ -136,7 +140,8 @@ class Result BASE_EMBEDDED {
uint32_t value_;
class TypeField: public BitField<Type, 0, 2> {};
- class DataField: public BitField<uint32_t, 2, 32 - 3> {};
+ class NumberInfoField : public BitField<NumberInfo::Type, 2, 3> {};
+ class DataField: public BitField<uint32_t, 5, 32 - 6> {};
inline void CopyTo(Result* destination) const;
@@ -235,18 +240,18 @@ class RegisterAllocator BASE_EMBEDDED {
// Predicates and accessors for the registers' reference counts.
bool is_used(int num) { return registers_.is_used(num); }
- bool is_used(Register reg) { return registers_.is_used(ToNumber(reg)); }
+ inline bool is_used(Register reg);
int count(int num) { return registers_.count(num); }
- int count(Register reg) { return registers_.count(ToNumber(reg)); }
+ inline int count(Register reg);
// Explicitly record a reference to a register.
void Use(int num) { registers_.Use(num); }
- void Use(Register reg) { registers_.Use(ToNumber(reg)); }
+ inline void Use(Register reg);
// Explicitly record that a register will no longer be used.
void Unuse(int num) { registers_.Unuse(num); }
- void Unuse(Register reg) { registers_.Unuse(ToNumber(reg)); }
+ inline void Unuse(Register reg);
// Reset the register reference counts to free all non-reserved registers.
void Reset() { registers_.Reset(); }
diff --git a/deps/v8/src/runtime.cc b/deps/v8/src/runtime.cc
index 515343b7b..38e332b24 100644
--- a/deps/v8/src/runtime.cc
+++ b/deps/v8/src/runtime.cc
@@ -596,8 +596,9 @@ static Object* Runtime_GetOwnProperty(Arguments args) {
if (result.type() == CALLBACKS) {
Object* structure = result.GetCallbackObject();
- if (structure->IsProxy()) {
- // Property that is internally implemented as a callback.
+ if (structure->IsProxy() || structure->IsAccessorInfo()) {
+ // Property that is internally implemented as a callback or
+ // an API defined callback.
Object* value = obj->GetPropertyWithCallback(
obj, structure, name, result.holder());
elms->set(0, Heap::false_value());
@@ -609,7 +610,6 @@ static Object* Runtime_GetOwnProperty(Arguments args) {
elms->set(1, FixedArray::cast(structure)->get(0));
elms->set(2, FixedArray::cast(structure)->get(1));
} else {
- // TODO(ricow): Handle API callbacks.
return Heap::undefined_value();
}
} else {
@@ -619,7 +619,7 @@ static Object* Runtime_GetOwnProperty(Arguments args) {
}
elms->set(3, Heap::ToBoolean(!result.IsDontEnum()));
- elms->set(4, Heap::ToBoolean(!result.IsReadOnly()));
+ elms->set(4, Heap::ToBoolean(!result.IsDontDelete()));
return *desc;
}
@@ -1208,17 +1208,6 @@ static Object* Runtime_OptimizeObjectForAddingMultipleProperties(
}
-static Object* Runtime_TransformToFastProperties(Arguments args) {
- HandleScope scope;
- ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(JSObject, object, 0);
- if (!object->HasFastProperties() && !object->IsGlobalObject()) {
- TransformToFastProperties(object, 0);
- }
- return *object;
-}
-
-
static Object* Runtime_RegExpExec(Arguments args) {
HandleScope scope;
ASSERT(args.length() == 4);
@@ -2888,6 +2877,67 @@ static Object* Runtime_KeyedGetProperty(Arguments args) {
}
+static Object* Runtime_DefineOrRedefineAccessorProperty(Arguments args) {
+ ASSERT(args.length() == 5);
+ HandleScope scope;
+ CONVERT_ARG_CHECKED(JSObject, obj, 0);
+ CONVERT_CHECKED(String, name, args[1]);
+ CONVERT_CHECKED(Smi, flag_setter, args[2]);
+ CONVERT_CHECKED(JSFunction, fun, args[3]);
+ CONVERT_CHECKED(Smi, flag_attr, args[4]);
+ int unchecked = flag_attr->value();
+ RUNTIME_ASSERT((unchecked & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
+ RUNTIME_ASSERT(!obj->IsNull());
+ LookupResult result;
+ obj->LocalLookupRealNamedProperty(name, &result);
+
+ PropertyAttributes attr = static_cast<PropertyAttributes>(unchecked);
+ // If an existing property is either FIELD, NORMAL or CONSTANT_FUNCTION
+ // delete it to avoid running into trouble in DefineAccessor, which
+ // handles this incorrectly if the property is readonly (does nothing)
+ if (result.IsProperty() &&
+ (result.type() == FIELD || result.type() == NORMAL
+ || result.type() == CONSTANT_FUNCTION)) {
+ obj->DeleteProperty(name, JSObject::NORMAL_DELETION);
+ }
+ return obj->DefineAccessor(name, flag_setter->value() == 0, fun, attr);
+}
+
+static Object* Runtime_DefineOrRedefineDataProperty(Arguments args) {
+ ASSERT(args.length() == 4);
+ HandleScope scope;
+ CONVERT_ARG_CHECKED(JSObject, js_object, 0);
+ CONVERT_ARG_CHECKED(String, name, 1);
+ Handle<Object> obj_value = args.at<Object>(2);
+
+ CONVERT_CHECKED(Smi, flag, args[3]);
+ int unchecked = flag->value();
+ RUNTIME_ASSERT((unchecked & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
+
+ LookupResult result;
+ js_object->LocalLookupRealNamedProperty(*name, &result);
+
+ PropertyAttributes attr = static_cast<PropertyAttributes>(unchecked);
+
+ // Take special care when attributes are different and there is already
+ // a property. For simplicity we normalize the property which enables us
+ // to not worry about changing the instance_descriptor and creating a new
+ // map. The current version of SetObjectProperty does not handle attributes
+ // correctly in the case where a property is a field and is reset with
+ // new attributes.
+ if (result.IsProperty() && attr != result.GetAttributes()) {
+ // New attributes - normalize to avoid writing to instance descriptor
+ js_object->NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
+ // Use IgnoreAttributes version since a readonly property may be
+ // overridden and SetProperty does not allow this.
+ return js_object->IgnoreAttributesAndSetLocalProperty(*name,
+ *obj_value,
+ attr);
+ }
+ return Runtime::SetObjectProperty(js_object, name, obj_value, attr);
+}
+
+
Object* Runtime::SetObjectProperty(Handle<Object> object,
Handle<Object> key,
Handle<Object> value,
@@ -2910,8 +2960,6 @@ Object* Runtime::SetObjectProperty(Handle<Object> object,
// Check if the given key is an array index.
uint32_t index;
if (Array::IndexFromObject(*key, &index)) {
- ASSERT(attr == NONE);
-
// In Firefox/SpiderMonkey, Safari and Opera you can access the characters
// of a string using [] notation. We need to support this too in
// JavaScript.
@@ -2931,7 +2979,6 @@ Object* Runtime::SetObjectProperty(Handle<Object> object,
if (key->IsString()) {
Handle<Object> result;
if (Handle<String>::cast(key)->AsArrayIndex(&index)) {
- ASSERT(attr == NONE);
result = SetElement(js_object, index, value);
} else {
Handle<String> key_string = Handle<String>::cast(key);
@@ -2949,7 +2996,6 @@ Object* Runtime::SetObjectProperty(Handle<Object> object,
Handle<String> name = Handle<String>::cast(converted);
if (name->AsArrayIndex(&index)) {
- ASSERT(attr == NONE);
return js_object->SetElement(index, *value);
} else {
return js_object->SetProperty(*name, *value, attr);
@@ -2966,8 +3012,6 @@ Object* Runtime::ForceSetObjectProperty(Handle<JSObject> js_object,
// Check if the given key is an array index.
uint32_t index;
if (Array::IndexFromObject(*key, &index)) {
- ASSERT(attr == NONE);
-
// In Firefox/SpiderMonkey, Safari and Opera you can access the characters
// of a string using [] notation. We need to support this too in
// JavaScript.
@@ -2984,7 +3028,6 @@ Object* Runtime::ForceSetObjectProperty(Handle<JSObject> js_object,
if (key->IsString()) {
if (Handle<String>::cast(key)->AsArrayIndex(&index)) {
- ASSERT(attr == NONE);
return js_object->SetElement(index, *value);
} else {
Handle<String> key_string = Handle<String>::cast(key);
@@ -3002,7 +3045,6 @@ Object* Runtime::ForceSetObjectProperty(Handle<JSObject> js_object,
Handle<String> name = Handle<String>::cast(converted);
if (name->AsArrayIndex(&index)) {
- ASSERT(attr == NONE);
return js_object->SetElement(index, *value);
} else {
return js_object->IgnoreAttributesAndSetLocalProperty(*name, *value, attr);
@@ -3461,17 +3503,23 @@ static Object* Runtime_GetArgumentsProperty(Arguments args) {
static Object* Runtime_ToFastProperties(Arguments args) {
+ HandleScope scope;
+
ASSERT(args.length() == 1);
Handle<Object> object = args.at<Object>(0);
if (object->IsJSObject()) {
Handle<JSObject> js_object = Handle<JSObject>::cast(object);
- js_object->TransformToFastProperties(0);
+ if (!js_object->HasFastProperties() && !js_object->IsGlobalObject()) {
+ js_object->TransformToFastProperties(0);
+ }
}
return *object;
}
static Object* Runtime_ToSlowProperties(Arguments args) {
+ HandleScope scope;
+
ASSERT(args.length() == 1);
Handle<Object> object = args.at<Object>(0);
if (object->IsJSObject()) {
@@ -4709,41 +4757,6 @@ static Object* Runtime_Math_tan(Arguments args) {
}
-// The NewArguments function is only used when constructing the
-// arguments array when calling non-functions from JavaScript in
-// runtime.js:CALL_NON_FUNCTION.
-static Object* Runtime_NewArguments(Arguments args) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 1);
-
- // ECMA-262, 3rd., 10.1.8, p.39
- CONVERT_CHECKED(JSFunction, callee, args[0]);
-
- // Compute the frame holding the arguments.
- JavaScriptFrameIterator it;
- it.AdvanceToArgumentsFrame();
- JavaScriptFrame* frame = it.frame();
-
- const int length = frame->GetProvidedParametersCount();
- Object* result = Heap::AllocateArgumentsObject(callee, length);
- if (result->IsFailure()) return result;
- if (length > 0) {
- Object* obj = Heap::AllocateFixedArray(length);
- if (obj->IsFailure()) return obj;
- FixedArray* array = FixedArray::cast(obj);
- ASSERT(array->length() == length);
-
- AssertNoAllocation no_gc;
- WriteBarrierMode mode = array->GetWriteBarrierMode(no_gc);
- for (int i = 0; i < length; i++) {
- array->set(i, frame->GetParameter(i), mode);
- }
- JSObject::cast(result)->set_elements(array);
- }
- return result;
-}
-
-
static Object* Runtime_NewArgumentsFast(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 3);
@@ -4790,21 +4803,21 @@ static Object* Runtime_NewClosure(Arguments args) {
}
-static Code* ComputeConstructStub(Handle<SharedFunctionInfo> shared) {
- // TODO(385): Change this to create a construct stub specialized for
- // the given map to make allocation of simple objects - and maybe
- // arrays - much faster.
- if (FLAG_inline_new
- && shared->has_only_simple_this_property_assignments()) {
+static Code* ComputeConstructStub(Handle<JSFunction> function) {
+ Handle<Object> prototype = Factory::null_value();
+ if (function->has_instance_prototype()) {
+ prototype = Handle<Object>(function->instance_prototype());
+ }
+ if (function->shared()->CanGenerateInlineConstructor(*prototype)) {
ConstructStubCompiler compiler;
- Object* code = compiler.CompileConstructStub(*shared);
+ Object* code = compiler.CompileConstructStub(function->shared());
if (code->IsFailure()) {
return Builtins::builtin(Builtins::JSConstructStubGeneric);
}
return Code::cast(code);
}
- return shared->construct_stub();
+ return function->shared()->construct_stub();
}
@@ -4854,10 +4867,9 @@ static Object* Runtime_NewObject(Arguments args) {
bool first_allocation = !function->has_initial_map();
Handle<JSObject> result = Factory::NewJSObject(function);
if (first_allocation) {
- Handle<Map> map = Handle<Map>(function->initial_map());
Handle<Code> stub = Handle<Code>(
- ComputeConstructStub(Handle<SharedFunctionInfo>(function->shared())));
- function->shared()->set_construct_stub(*stub);
+ ComputeConstructStub(Handle<JSFunction>(function)));
+ shared->set_construct_stub(*stub);
}
Counters::constructed_objects.Increment();
@@ -4896,28 +4908,6 @@ static Object* Runtime_LazyCompile(Arguments args) {
}
-static Object* Runtime_GetCalledFunction(Arguments args) {
- HandleScope scope;
- ASSERT(args.length() == 0);
- StackFrameIterator it;
- // Get past the JS-to-C exit frame.
- ASSERT(it.frame()->is_exit());
- it.Advance();
- // Get past the CALL_NON_FUNCTION activation frame.
- ASSERT(it.frame()->is_java_script());
- it.Advance();
- // Argument adaptor frames do not copy the function; we have to skip
- // past them to get to the real calling frame.
- if (it.frame()->is_arguments_adaptor()) it.Advance();
- // Get the function from the top of the expression stack of the
- // calling frame.
- StandardFrame* frame = StandardFrame::cast(it.frame());
- int index = frame->ComputeExpressionsCount() - 1;
- Object* result = frame->GetExpression(index);
- return result;
-}
-
-
static Object* Runtime_GetFunctionDelegate(Arguments args) {
HandleScope scope;
ASSERT(args.length() == 1);
@@ -7922,20 +7912,22 @@ static Object* Runtime_FunctionGetInferredName(Arguments args) {
static Object* Runtime_ProfilerResume(Arguments args) {
NoHandleAllocation ha;
- ASSERT(args.length() == 1);
+ ASSERT(args.length() == 2);
CONVERT_CHECKED(Smi, smi_modules, args[0]);
- v8::V8::ResumeProfilerEx(smi_modules->value());
+ CONVERT_CHECKED(Smi, smi_tag, args[1]);
+ v8::V8::ResumeProfilerEx(smi_modules->value(), smi_tag->value());
return Heap::undefined_value();
}
static Object* Runtime_ProfilerPause(Arguments args) {
NoHandleAllocation ha;
- ASSERT(args.length() == 1);
+ ASSERT(args.length() == 2);
CONVERT_CHECKED(Smi, smi_modules, args[0]);
- v8::V8::PauseProfilerEx(smi_modules->value());
+ CONVERT_CHECKED(Smi, smi_tag, args[1]);
+ v8::V8::PauseProfilerEx(smi_modules->value(), smi_tag->value());
return Heap::undefined_value();
}
diff --git a/deps/v8/src/runtime.h b/deps/v8/src/runtime.h
index b2b8609e7..e2e5c2212 100644
--- a/deps/v8/src/runtime.h
+++ b/deps/v8/src/runtime.h
@@ -71,10 +71,8 @@ namespace internal {
F(IsExtensible, 1, 1) \
\
/* Utilities */ \
- F(GetCalledFunction, 0, 1) \
F(GetFunctionDelegate, 1, 1) \
F(GetConstructorDelegate, 1, 1) \
- F(NewArguments, 1, 1) \
F(NewArgumentsFast, 3, 1) \
F(LazyCompile, 1, 1) \
F(SetNewFunctionAttributes, 1, 1) \
@@ -215,6 +213,8 @@ namespace internal {
F(ResolvePossiblyDirectEval, 3, 2) \
\
F(SetProperty, -1 /* 3 or 4 */, 1) \
+ F(DefineOrRedefineDataProperty, 4, 1) \
+ F(DefineOrRedefineAccessorProperty, 5, 1) \
F(IgnoreAttributesAndSetProperty, -1 /* 3 or 4 */, 1) \
\
/* Arrays */ \
@@ -266,7 +266,6 @@ namespace internal {
F(InitializeConstGlobal, 2, 1) \
F(InitializeConstContextSlot, 3, 1) \
F(OptimizeObjectForAddingMultipleProperties, 2, 1) \
- F(TransformToFastProperties, 1, 1) \
\
/* Debugging */ \
F(DebugPrint, 1, 1) \
@@ -329,8 +328,8 @@ namespace internal {
#ifdef ENABLE_LOGGING_AND_PROFILING
#define RUNTIME_FUNCTION_LIST_PROFILER_SUPPORT(F) \
- F(ProfilerResume, 1, 1) \
- F(ProfilerPause, 1, 1)
+ F(ProfilerResume, 2, 1) \
+ F(ProfilerPause, 2, 1)
#else
#define RUNTIME_FUNCTION_LIST_PROFILER_SUPPORT(F)
#endif
diff --git a/deps/v8/src/runtime.js b/deps/v8/src/runtime.js
index c4c855eb9..231763cbc 100644
--- a/deps/v8/src/runtime.js
+++ b/deps/v8/src/runtime.js
@@ -178,7 +178,7 @@ function STRING_ADD_LEFT(y) {
y = %_ValueOf(y);
} else {
y = IS_NUMBER(y)
- ? %NumberToString(y)
+ ? %_NumberToString(y)
: %ToString(%ToPrimitive(y, NO_HINT));
}
}
@@ -194,7 +194,7 @@ function STRING_ADD_RIGHT(y) {
x = %_ValueOf(x);
} else {
x = IS_NUMBER(x)
- ? %NumberToString(x)
+ ? %_NumberToString(x)
: %ToString(%ToPrimitive(x, NO_HINT));
}
}
@@ -395,26 +395,20 @@ function FILTER_KEY(key) {
function CALL_NON_FUNCTION() {
- var callee = %GetCalledFunction();
- var delegate = %GetFunctionDelegate(callee);
+ var delegate = %GetFunctionDelegate(this);
if (!IS_FUNCTION(delegate)) {
- throw %MakeTypeError('called_non_callable', [typeof callee]);
+ throw %MakeTypeError('called_non_callable', [typeof this]);
}
-
- var parameters = %NewArguments(delegate);
- return delegate.apply(callee, parameters);
+ return delegate.apply(this, arguments);
}
function CALL_NON_FUNCTION_AS_CONSTRUCTOR() {
- var callee = %GetCalledFunction();
- var delegate = %GetConstructorDelegate(callee);
+ var delegate = %GetConstructorDelegate(this);
if (!IS_FUNCTION(delegate)) {
- throw %MakeTypeError('called_non_callable', [typeof callee]);
+ throw %MakeTypeError('called_non_callable', [typeof this]);
}
-
- var parameters = %NewArguments(delegate);
- return delegate.apply(callee, parameters);
+ return delegate.apply(this, arguments);
}
@@ -506,6 +500,16 @@ function ToPrimitive(x, hint) {
}
+// ECMA-262, section 9.2, page 30
+function ToBoolean(x) {
+ if (IS_BOOLEAN(x)) return x;
+ if (IS_STRING(x)) return x.length != 0;
+ if (x == null) return false;
+ if (IS_NUMBER(x)) return !((x == 0) || NUMBER_IS_NAN(x));
+ return true;
+}
+
+
// ECMA-262, section 9.3, page 31.
function ToNumber(x) {
if (IS_NUMBER(x)) return x;
@@ -519,23 +523,13 @@ function ToNumber(x) {
// ECMA-262, section 9.8, page 35.
function ToString(x) {
if (IS_STRING(x)) return x;
- if (IS_NUMBER(x)) return %NumberToString(x);
+ if (IS_NUMBER(x)) return %_NumberToString(x);
if (IS_BOOLEAN(x)) return x ? 'true' : 'false';
if (IS_UNDEFINED(x)) return 'undefined';
return (IS_NULL(x)) ? 'null' : %ToString(%DefaultString(x));
}
-// ... where did this come from?
-function ToBoolean(x) {
- if (IS_BOOLEAN(x)) return x;
- if (IS_STRING(x)) return x.length != 0;
- if (x == null) return false;
- if (IS_NUMBER(x)) return !((x == 0) || NUMBER_IS_NAN(x));
- return true;
-}
-
-
// ECMA-262, section 9.9, page 36.
function ToObject(x) {
if (IS_STRING(x)) return new $String(x);
@@ -569,6 +563,25 @@ function ToInt32(x) {
}
+// ES5, section 9.12
+function SameValue(x, y) {
+ if (typeof x != typeof y) return false;
+ if (IS_NULL_OR_UNDEFINED(x)) return true;
+ if (IS_NUMBER(x)) {
+ if (NUMBER_IS_NAN(x) && NUMBER_IS_NAN(y)) return true;
+ // x is +0 and y is -0 or vice versa
+ if (x === 0 && y === 0 && !%_IsSmi(x) && !%_IsSmi(y) &&
+ ((1 / x < 0 && 1 / y > 0) || (1 / x > 0 && 1 / y < 0))) {
+ return false;
+ }
+ return x == y;
+ }
+ if (IS_STRING(x)) return %StringEquals(x, y);
+ if (IS_BOOLEAN(x))return %NumberEquals(%ToNumber(x),%ToNumber(y));
+
+ return %_ObjectEquals(x, y);
+}
+
/* ---------------------------------
- - - U t i l i t i e s - - -
diff --git a/deps/v8/src/simulator.h b/deps/v8/src/simulator.h
index 6f8cd5a28..485e93064 100644
--- a/deps/v8/src/simulator.h
+++ b/deps/v8/src/simulator.h
@@ -34,6 +34,8 @@
#include "x64/simulator-x64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/simulator-arm.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "mips/simulator-mips.h"
#else
#error Unsupported target architecture.
#endif
diff --git a/deps/v8/src/string.js b/deps/v8/src/string.js
index ed938ecfa..ba01ed67e 100644
--- a/deps/v8/src/string.js
+++ b/deps/v8/src/string.js
@@ -87,14 +87,18 @@ function StringCharCodeAt(pos) {
// ECMA-262, section 15.5.4.6
function StringConcat() {
- var len = %_ArgumentsLength() + 1;
- var parts = new $Array(len);
- parts[0] = IS_STRING(this) ? this : ToString(this);
- for (var i = 1; i < len; i++) {
- var part = %_Arguments(i - 1);
- parts[i] = IS_STRING(part) ? part : ToString(part);
+ var len = %_ArgumentsLength();
+ var this_as_string = IS_STRING(this) ? this : ToString(this);
+ if (len === 1) {
+ return this_as_string + %_Arguments(0);
}
- return %StringBuilderConcat(parts, len, "");
+ var parts = new $Array(len + 1);
+ parts[0] = this_as_string;
+ for (var i = 0; i < len; i++) {
+ var part = %_Arguments(i);
+ parts[i + 1] = IS_STRING(part) ? part : ToString(part);
+ }
+ return %StringBuilderConcat(parts, len + 1, "");
}
// Match ES3 and Safari
diff --git a/deps/v8/src/stub-cache.cc b/deps/v8/src/stub-cache.cc
index 81f89fd4b..3adaa40c1 100644
--- a/deps/v8/src/stub-cache.cc
+++ b/deps/v8/src/stub-cache.cc
@@ -484,7 +484,10 @@ Object* StubCache::ComputeCallField(int argc,
Object* code = map->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
CallStubCompiler compiler(argc, in_loop);
- code = compiler.CompileCallField(object, holder, index, name);
+ code = compiler.CompileCallField(JSObject::cast(object),
+ holder,
+ index,
+ name);
if (code->IsFailure()) return code;
ASSERT_EQ(flags, Code::cast(code)->flags());
LOG(CodeCreateEvent(Logger::CALL_IC_TAG, Code::cast(code), name));
@@ -518,7 +521,9 @@ Object* StubCache::ComputeCallInterceptor(int argc,
Object* code = map->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
CallStubCompiler compiler(argc, NOT_IN_LOOP);
- code = compiler.CompileCallInterceptor(object, holder, name);
+ code = compiler.CompileCallInterceptor(JSObject::cast(object),
+ holder,
+ name);
if (code->IsFailure()) return code;
ASSERT_EQ(flags, Code::cast(code)->flags());
LOG(CodeCreateEvent(Logger::CALL_IC_TAG, Code::cast(code), name));
@@ -920,6 +925,13 @@ Object* StoreInterceptorProperty(Arguments args) {
}
+Object* KeyedLoadPropertyWithInterceptor(Arguments args) {
+ JSObject* receiver = JSObject::cast(args[0]);
+ uint32_t index = Smi::cast(args[1])->value();
+ return receiver->GetElementWithInterceptor(receiver, index);
+}
+
+
Object* StubCompiler::CompileCallInitialize(Code::Flags flags) {
HandleScope scope;
int argc = Code::ExtractArgumentsCountFromFlags(flags);
@@ -1058,11 +1070,13 @@ Object* StubCompiler::GetCodeWithFlags(Code::Flags flags, String* name) {
return GetCodeWithFlags(flags, reinterpret_cast<char*>(NULL));
}
+
void StubCompiler::LookupPostInterceptor(JSObject* holder,
String* name,
LookupResult* lookup) {
holder->LocalLookupRealNamedProperty(name, lookup);
- if (lookup->IsNotFound()) {
+ if (!lookup->IsProperty()) {
+ lookup->NotFound();
Object* proto = holder->GetPrototype();
if (proto != Heap::null_value()) {
proto->Lookup(name, lookup);
diff --git a/deps/v8/src/stub-cache.h b/deps/v8/src/stub-cache.h
index d97fe7732..43354db10 100644
--- a/deps/v8/src/stub-cache.h
+++ b/deps/v8/src/stub-cache.h
@@ -312,6 +312,7 @@ Object* LoadPropertyWithInterceptorForLoad(Arguments args);
Object* LoadPropertyWithInterceptorForCall(Arguments args);
Object* StoreInterceptorProperty(Arguments args);
Object* CallInterceptorProperty(Arguments args);
+Object* KeyedLoadPropertyWithInterceptor(Arguments args);
// Support function for computing call IC miss stubs.
@@ -346,6 +347,7 @@ class StubCompiler BASE_EMBEDDED {
static void GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
int index,
Register prototype);
+
static void GenerateFastPropertyLoad(MacroAssembler* masm,
Register dst, Register src,
JSObject* holder, int index);
@@ -354,22 +356,20 @@ class StubCompiler BASE_EMBEDDED {
Register receiver,
Register scratch,
Label* miss_label);
+
static void GenerateLoadStringLength(MacroAssembler* masm,
Register receiver,
- Register scratch,
+ Register scratch1,
+ Register scratch2,
Label* miss_label);
- static void GenerateLoadStringLength2(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* miss_label);
+
static void GenerateLoadFunctionPrototype(MacroAssembler* masm,
Register receiver,
Register scratch1,
Register scratch2,
Label* miss_label);
+
static void GenerateStoreField(MacroAssembler* masm,
- Builtins::Name storage_extend,
JSObject* object,
int index,
Map* transition,
@@ -377,16 +377,30 @@ class StubCompiler BASE_EMBEDDED {
Register name_reg,
Register scratch,
Label* miss_label);
+
static void GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind);
// Check the integrity of the prototype chain to make sure that the
// current IC is still valid.
+
+ Register CheckPrototypes(JSObject* object,
+ Register object_reg,
+ JSObject* holder,
+ Register holder_reg,
+ Register scratch,
+ String* name,
+ Label* miss) {
+ return CheckPrototypes(object, object_reg, holder, holder_reg, scratch,
+ name, kInvalidProtoDepth, miss);
+ }
+
Register CheckPrototypes(JSObject* object,
Register object_reg,
JSObject* holder,
Register holder_reg,
Register scratch,
String* name,
+ int save_at_depth,
Label* miss);
protected:
@@ -538,7 +552,7 @@ class CallStubCompiler: public StubCompiler {
explicit CallStubCompiler(int argc, InLoopFlag in_loop)
: arguments_(argc), in_loop_(in_loop) { }
- Object* CompileCallField(Object* object,
+ Object* CompileCallField(JSObject* object,
JSObject* holder,
int index,
String* name);
@@ -547,7 +561,7 @@ class CallStubCompiler: public StubCompiler {
JSFunction* function,
String* name,
CheckType check);
- Object* CompileCallInterceptor(Object* object,
+ Object* CompileCallInterceptor(JSObject* object,
JSObject* holder,
String* name);
Object* CompileCallGlobal(JSObject* object,
diff --git a/deps/v8/src/top.cc b/deps/v8/src/top.cc
index 027483858..b9db4be52 100644
--- a/deps/v8/src/top.cc
+++ b/deps/v8/src/top.cc
@@ -949,10 +949,15 @@ Handle<Context> Top::GetCallingGlobalContext() {
}
+bool Top::CanHaveSpecialFunctions(JSObject* object) {
+ return object->IsJSArray();
+}
+
+
Object* Top::LookupSpecialFunction(JSObject* receiver,
JSObject* prototype,
JSFunction* function) {
- if (receiver->IsJSArray()) {
+ if (CanHaveSpecialFunctions(receiver)) {
FixedArray* table = context()->global_context()->special_function_table();
for (int index = 0; index < table->length(); index +=3) {
if ((prototype == table->get(index)) &&
diff --git a/deps/v8/src/top.h b/deps/v8/src/top.h
index 8780844b0..ddc73ba55 100644
--- a/deps/v8/src/top.h
+++ b/deps/v8/src/top.h
@@ -342,6 +342,7 @@ class Top {
return Handle<JSBuiltinsObject>(thread_local_.context_->builtins());
}
+ static bool CanHaveSpecialFunctions(JSObject* object);
static Object* LookupSpecialFunction(JSObject* receiver,
JSObject* prototype,
JSFunction* value);
diff --git a/deps/v8/src/utils.cc b/deps/v8/src/utils.cc
index 374385b67..45a4cd60f 100644
--- a/deps/v8/src/utils.cc
+++ b/deps/v8/src/utils.cc
@@ -51,43 +51,6 @@ uint32_t RoundUpToPowerOf2(uint32_t x) {
}
-byte* EncodeInt(byte* p, int x) {
- while (x < -64 || x >= 64) {
- *p++ = static_cast<byte>(x & 127);
- x = ArithmeticShiftRight(x, 7);
- }
- // -64 <= x && x < 64
- *p++ = static_cast<byte>(x + 192);
- return p;
-}
-
-
-byte* DecodeInt(byte* p, int* x) {
- int r = 0;
- unsigned int s = 0;
- byte b = *p++;
- while (b < 128) {
- r |= static_cast<int>(b) << s;
- s += 7;
- b = *p++;
- }
- // b >= 128
- *x = r | ((static_cast<int>(b) - 192) << s);
- return p;
-}
-
-
-byte* EncodeUnsignedIntBackward(byte* p, unsigned int x) {
- while (x >= 128) {
- *--p = static_cast<byte>(x & 127);
- x = x >> 7;
- }
- // x < 128
- *--p = static_cast<byte>(x + 128);
- return p;
-}
-
-
// Thomas Wang, Integer Hash Functions.
// http://www.concentric.net/~Ttwang/tech/inthash.htm
uint32_t ComputeIntegerHash(uint32_t key) {
diff --git a/deps/v8/src/utils.h b/deps/v8/src/utils.h
index 0fd24ec9a..c59ca258a 100644
--- a/deps/v8/src/utils.h
+++ b/deps/v8/src/utils.h
@@ -174,48 +174,6 @@ class BitField {
// ----------------------------------------------------------------------------
-// Support for compressed, machine-independent encoding
-// and decoding of integer values of arbitrary size.
-
-// Encoding and decoding from/to a buffer at position p;
-// the result is the position after the encoded integer.
-// Small signed integers in the range -64 <= x && x < 64
-// are encoded in 1 byte; larger values are encoded in 2
-// or more bytes. At most sizeof(int) + 1 bytes are used
-// in the worst case.
-byte* EncodeInt(byte* p, int x);
-byte* DecodeInt(byte* p, int* x);
-
-
-// Encoding and decoding from/to a buffer at position p - 1
-// moving backward; the result is the position of the last
-// byte written. These routines are useful to read/write
-// into a buffer starting at the end of the buffer.
-byte* EncodeUnsignedIntBackward(byte* p, unsigned int x);
-
-// The decoding function is inlined since its performance is
-// important to mark-sweep garbage collection.
-inline byte* DecodeUnsignedIntBackward(byte* p, unsigned int* x) {
- byte b = *--p;
- if (b >= 128) {
- *x = static_cast<unsigned int>(b) - 128;
- return p;
- }
- unsigned int r = static_cast<unsigned int>(b);
- unsigned int s = 7;
- b = *--p;
- while (b < 128) {
- r |= static_cast<unsigned int>(b) << s;
- s += 7;
- b = *--p;
- }
- // b >= 128
- *x = r | ((static_cast<unsigned int>(b) - 128) << s);
- return p;
-}
-
-
-// ----------------------------------------------------------------------------
// Hash function.
uint32_t ComputeIntegerHash(uint32_t key);
diff --git a/deps/v8/src/v8-counters.h b/deps/v8/src/v8-counters.h
index 7397c3044..eaac2dbbb 100644
--- a/deps/v8/src/v8-counters.h
+++ b/deps/v8/src/v8-counters.h
@@ -100,70 +100,76 @@ namespace internal {
SC(total_full_codegen_source_size, V8.TotalFullCodegenSourceSize)
-#define STATS_COUNTER_LIST_2(SC) \
- /* Number of code stubs. */ \
- SC(code_stubs, V8.CodeStubs) \
- /* Amount of stub code. */ \
- SC(total_stubs_code_size, V8.TotalStubsCodeSize) \
- /* Amount of (JS) compiled code. */ \
- SC(total_compiled_code_size, V8.TotalCompiledCodeSize) \
- SC(gc_compactor_caused_by_request, V8.GCCompactorCausedByRequest) \
- SC(gc_compactor_caused_by_promoted_data, \
- V8.GCCompactorCausedByPromotedData) \
- SC(gc_compactor_caused_by_oldspace_exhaustion, \
- V8.GCCompactorCausedByOldspaceExhaustion) \
- SC(gc_compactor_caused_by_weak_handles, \
- V8.GCCompactorCausedByWeakHandles) \
- SC(gc_last_resort_from_js, V8.GCLastResortFromJS) \
- SC(gc_last_resort_from_handles, V8.GCLastResortFromHandles) \
- /* How is the generic keyed-load stub used? */ \
- SC(keyed_load_generic_smi, V8.KeyedLoadGenericSmi) \
- SC(keyed_load_generic_symbol, V8.KeyedLoadGenericSymbol) \
- SC(keyed_load_generic_slow, V8.KeyedLoadGenericSlow) \
- SC(keyed_load_external_array_slow, V8.KeyedLoadExternalArraySlow) \
- /* Count how much the monomorphic keyed-load stubs are hit. */ \
- SC(keyed_load_function_prototype, V8.KeyedLoadFunctionPrototype) \
- SC(keyed_load_string_length, V8.KeyedLoadStringLength) \
- SC(keyed_load_array_length, V8.KeyedLoadArrayLength) \
- SC(keyed_load_constant_function, V8.KeyedLoadConstantFunction) \
- SC(keyed_load_field, V8.KeyedLoadField) \
- SC(keyed_load_callback, V8.KeyedLoadCallback) \
- SC(keyed_load_interceptor, V8.KeyedLoadInterceptor) \
- SC(keyed_load_inline, V8.KeyedLoadInline) \
- SC(keyed_load_inline_miss, V8.KeyedLoadInlineMiss) \
- SC(named_load_inline, V8.NamedLoadInline) \
- SC(named_load_inline_miss, V8.NamedLoadInlineMiss) \
- SC(named_load_global_inline, V8.NamedLoadGlobalInline) \
- SC(named_load_global_inline_miss, V8.NamedLoadGlobalInlineMiss) \
- SC(keyed_store_field, V8.KeyedStoreField) \
- SC(keyed_store_inline, V8.KeyedStoreInline) \
- SC(keyed_store_inline_miss, V8.KeyedStoreInlineMiss) \
- SC(named_store_global_inline, V8.NamedStoreGlobalInline) \
- SC(named_store_global_inline_miss, V8.NamedStoreGlobalInlineMiss) \
- SC(call_global_inline, V8.CallGlobalInline) \
- SC(call_global_inline_miss, V8.CallGlobalInlineMiss) \
- SC(constructed_objects, V8.ConstructedObjects) \
- SC(constructed_objects_runtime, V8.ConstructedObjectsRuntime) \
- SC(constructed_objects_stub, V8.ConstructedObjectsStub) \
- SC(array_function_runtime, V8.ArrayFunctionRuntime) \
- SC(array_function_native, V8.ArrayFunctionNative) \
- SC(for_in, V8.ForIn) \
- SC(enum_cache_hits, V8.EnumCacheHits) \
- SC(enum_cache_misses, V8.EnumCacheMisses) \
- SC(reloc_info_count, V8.RelocInfoCount) \
- SC(reloc_info_size, V8.RelocInfoSize) \
- SC(zone_segment_bytes, V8.ZoneSegmentBytes) \
- SC(compute_entry_frame, V8.ComputeEntryFrame) \
- SC(generic_binary_stub_calls, V8.GenericBinaryStubCalls) \
- SC(generic_binary_stub_calls_regs, V8.GenericBinaryStubCallsRegs) \
- SC(string_add_runtime, V8.StringAddRuntime) \
- SC(string_add_native, V8.StringAddNative) \
- SC(sub_string_runtime, V8.SubStringRuntime) \
- SC(sub_string_native, V8.SubStringNative) \
- SC(string_compare_native, V8.StringCompareNative) \
- SC(string_compare_runtime, V8.StringCompareRuntime) \
- SC(regexp_entry_runtime, V8.RegExpEntryRuntime) \
- SC(regexp_entry_native, V8.RegExpEntryNative)
+#define STATS_COUNTER_LIST_2(SC) \
+ /* Number of code stubs. */ \
+ SC(code_stubs, V8.CodeStubs) \
+ /* Amount of stub code. */ \
+ SC(total_stubs_code_size, V8.TotalStubsCodeSize) \
+ /* Amount of (JS) compiled code. */ \
+ SC(total_compiled_code_size, V8.TotalCompiledCodeSize) \
+ SC(gc_compactor_caused_by_request, V8.GCCompactorCausedByRequest) \
+ SC(gc_compactor_caused_by_promoted_data, \
+ V8.GCCompactorCausedByPromotedData) \
+ SC(gc_compactor_caused_by_oldspace_exhaustion, \
+ V8.GCCompactorCausedByOldspaceExhaustion) \
+ SC(gc_compactor_caused_by_weak_handles, \
+ V8.GCCompactorCausedByWeakHandles) \
+ SC(gc_last_resort_from_js, V8.GCLastResortFromJS) \
+ SC(gc_last_resort_from_handles, V8.GCLastResortFromHandles) \
+ /* How is the generic keyed-load stub used? */ \
+ SC(keyed_load_generic_smi, V8.KeyedLoadGenericSmi) \
+ SC(keyed_load_generic_symbol, V8.KeyedLoadGenericSymbol) \
+ SC(keyed_load_generic_slow, V8.KeyedLoadGenericSlow) \
+ SC(keyed_load_external_array_slow, V8.KeyedLoadExternalArraySlow) \
+ /* Count how much the monomorphic keyed-load stubs are hit. */ \
+ SC(keyed_load_function_prototype, V8.KeyedLoadFunctionPrototype) \
+ SC(keyed_load_string_length, V8.KeyedLoadStringLength) \
+ SC(keyed_load_array_length, V8.KeyedLoadArrayLength) \
+ SC(keyed_load_constant_function, V8.KeyedLoadConstantFunction) \
+ SC(keyed_load_field, V8.KeyedLoadField) \
+ SC(keyed_load_callback, V8.KeyedLoadCallback) \
+ SC(keyed_load_interceptor, V8.KeyedLoadInterceptor) \
+ SC(keyed_load_inline, V8.KeyedLoadInline) \
+ SC(keyed_load_inline_miss, V8.KeyedLoadInlineMiss) \
+ SC(named_load_inline, V8.NamedLoadInline) \
+ SC(named_load_inline_miss, V8.NamedLoadInlineMiss) \
+ SC(named_load_global_inline, V8.NamedLoadGlobalInline) \
+ SC(named_load_global_inline_miss, V8.NamedLoadGlobalInlineMiss) \
+ SC(keyed_store_field, V8.KeyedStoreField) \
+ SC(keyed_store_inline, V8.KeyedStoreInline) \
+ SC(keyed_store_inline_miss, V8.KeyedStoreInlineMiss) \
+ SC(named_store_global_inline, V8.NamedStoreGlobalInline) \
+ SC(named_store_global_inline_miss, V8.NamedStoreGlobalInlineMiss) \
+ SC(call_const, V8.CallConst) \
+ SC(call_const_fast_api, V8.CallConstFastApi) \
+ SC(call_const_interceptor, V8.CallConstInterceptor) \
+ SC(call_const_interceptor_fast_api, V8.CallConstInterceptorFastApi) \
+ SC(call_global_inline, V8.CallGlobalInline) \
+ SC(call_global_inline_miss, V8.CallGlobalInlineMiss) \
+ SC(constructed_objects, V8.ConstructedObjects) \
+ SC(constructed_objects_runtime, V8.ConstructedObjectsRuntime) \
+ SC(constructed_objects_stub, V8.ConstructedObjectsStub) \
+ SC(array_function_runtime, V8.ArrayFunctionRuntime) \
+ SC(array_function_native, V8.ArrayFunctionNative) \
+ SC(for_in, V8.ForIn) \
+ SC(enum_cache_hits, V8.EnumCacheHits) \
+ SC(enum_cache_misses, V8.EnumCacheMisses) \
+ SC(reloc_info_count, V8.RelocInfoCount) \
+ SC(reloc_info_size, V8.RelocInfoSize) \
+ SC(zone_segment_bytes, V8.ZoneSegmentBytes) \
+ SC(compute_entry_frame, V8.ComputeEntryFrame) \
+ SC(generic_binary_stub_calls, V8.GenericBinaryStubCalls) \
+ SC(generic_binary_stub_calls_regs, V8.GenericBinaryStubCallsRegs) \
+ SC(string_add_runtime, V8.StringAddRuntime) \
+ SC(string_add_native, V8.StringAddNative) \
+ SC(sub_string_runtime, V8.SubStringRuntime) \
+ SC(sub_string_native, V8.SubStringNative) \
+ SC(string_compare_native, V8.StringCompareNative) \
+ SC(string_compare_runtime, V8.StringCompareRuntime) \
+ SC(regexp_entry_runtime, V8.RegExpEntryRuntime) \
+ SC(regexp_entry_native, V8.RegExpEntryNative) \
+ SC(number_to_string_native, V8.NumberToStringNative) \
+ SC(number_to_string_runtime, V8.NumberToStringRuntime)
// This file contains all the v8 counters that are in use.
class Counters : AllStatic {
diff --git a/deps/v8/src/v8.cc b/deps/v8/src/v8.cc
index 3bec827aa..395336117 100644
--- a/deps/v8/src/v8.cc
+++ b/deps/v8/src/v8.cc
@@ -114,8 +114,11 @@ bool V8::Initialize(Deserializer *des) {
OProfileAgent::Initialize();
- if (FLAG_log_code) {
+ // If we are deserializing, log non-function code objects and compiled
+ // functions found in the snapshot.
+ if (des != NULL && FLAG_log_code) {
HandleScope scope;
+ LOG(LogCodeObjects());
LOG(LogCompiledFunctions());
}
diff --git a/deps/v8/src/v8natives.js b/deps/v8/src/v8natives.js
index 74750653d..6a32d7bdd 100644
--- a/deps/v8/src/v8natives.js
+++ b/deps/v8/src/v8natives.js
@@ -56,7 +56,7 @@ function InstallFunctions(object, attributes, functions) {
%FunctionSetName(f, key);
%SetProperty(object, key, f, attributes);
}
- %TransformToFastProperties(object);
+ %ToFastProperties(object);
}
// Emulates JSC by installing functions on a hidden prototype that
@@ -307,7 +307,7 @@ function IsInconsistentDescriptor(desc) {
// ES5 8.10.4
function FromPropertyDescriptor(desc) {
- if(IS_UNDEFINED(desc)) return desc;
+ if (IS_UNDEFINED(desc)) return desc;
var obj = new $Object();
if (IsDataDescriptor(desc)) {
obj.value = desc.getValue();
@@ -333,7 +333,6 @@ function ToPropertyDescriptor(obj) {
desc.setEnumerable(ToBoolean(obj.enumerable));
}
-
if ("configurable" in obj) {
desc.setConfigurable(ToBoolean(obj.configurable));
}
@@ -377,7 +376,9 @@ function PropertyDescriptor() {
this.writable_ = false;
this.hasWritable_ = false;
this.enumerable_ = false;
+ this.hasEnumerable_ = false;
this.configurable_ = false;
+ this.hasConfigurable_ = false;
this.get_ = void 0;
this.hasGetter_ = false;
this.set_ = void 0;
@@ -396,8 +397,14 @@ PropertyDescriptor.prototype.getValue = function() {
}
+PropertyDescriptor.prototype.hasValue = function() {
+ return this.hasValue_;
+}
+
+
PropertyDescriptor.prototype.setEnumerable = function(enumerable) {
this.enumerable_ = enumerable;
+ this.hasEnumerable_ = true;
}
@@ -406,6 +413,11 @@ PropertyDescriptor.prototype.isEnumerable = function () {
}
+PropertyDescriptor.prototype.hasEnumerable = function() {
+ return this.hasEnumerable_;
+}
+
+
PropertyDescriptor.prototype.setWritable = function(writable) {
this.writable_ = writable;
this.hasWritable_ = true;
@@ -419,6 +431,12 @@ PropertyDescriptor.prototype.isWritable = function() {
PropertyDescriptor.prototype.setConfigurable = function(configurable) {
this.configurable_ = configurable;
+ this.hasConfigurable_ = true;
+}
+
+
+PropertyDescriptor.prototype.hasConfigurable = function() {
+ return this.hasConfigurable_;
}
@@ -438,6 +456,11 @@ PropertyDescriptor.prototype.getGet = function() {
}
+PropertyDescriptor.prototype.hasGetter = function() {
+ return this.hasGetter_;
+}
+
+
PropertyDescriptor.prototype.setSet = function(set) {
this.set_ = set;
this.hasSetter_ = true;
@@ -449,6 +472,12 @@ PropertyDescriptor.prototype.getSet = function() {
}
+PropertyDescriptor.prototype.hasSetter = function() {
+ return this.hasSetter_;
+}
+
+
+
// ES5 section 8.12.1.
function GetOwnProperty(obj, p) {
var desc = new PropertyDescriptor();
@@ -458,8 +487,7 @@ function GetOwnProperty(obj, p) {
// obj is an accessor [true, Get, Set, Enumerable, Configurable]
var props = %GetOwnProperty(ToObject(obj), ToString(p));
- if (IS_UNDEFINED(props))
- return void 0;
+ if (IS_UNDEFINED(props)) return void 0;
// This is an accessor
if (props[0]) {
@@ -476,16 +504,89 @@ function GetOwnProperty(obj, p) {
}
-// ES5 8.12.9. This version cannot cope with the property p already
-// being present on obj.
+// ES5 section 8.12.2.
+function GetProperty(obj, p) {
+ var prop = GetOwnProperty(obj);
+ if (!IS_UNDEFINED(prop)) return prop;
+ var proto = obj.__proto__;
+ if (IS_NULL(proto)) return void 0;
+ return GetProperty(proto, p);
+}
+
+
+// ES5 section 8.12.6
+function HasProperty(obj, p) {
+ var desc = GetProperty(obj, p);
+ return IS_UNDEFINED(desc) ? false : true;
+}
+
+
+// ES5 8.12.9.
function DefineOwnProperty(obj, p, desc, should_throw) {
- var flag = desc.isEnumerable() ? 0 : DONT_ENUM;
- if (IsDataDescriptor(desc)) {
- flag |= desc.isWritable() ? 0 : (DONT_DELETE | READ_ONLY);
- %SetProperty(obj, p, desc.getValue(), flag);
+ var current = GetOwnProperty(obj, p);
+ var extensible = %IsExtensible(ToObject(obj));
+
+ // Error handling according to spec.
+ // Step 3
+ if (IS_UNDEFINED(current) && !extensible)
+ throw MakeTypeError("define_disallowed", ["defineProperty"]);
+
+ if (!IS_UNDEFINED(current) && !current.isConfigurable()) {
+ // Step 7
+ if (desc.isConfigurable() || desc.isEnumerable() != current.isEnumerable())
+ throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+ // Step 9
+ if (IsDataDescriptor(current) != IsDataDescriptor(desc))
+ throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+ // Step 10
+ if (IsDataDescriptor(current) && IsDataDescriptor(desc)) {
+ if (!current.isWritable() && desc.isWritable())
+ throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+ if (!current.isWritable() && desc.hasValue() &&
+ !SameValue(desc.getValue(), current.getValue())) {
+ throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+ }
+ }
+ // Step 11
+ if (IsAccessorDescriptor(desc) && IsAccessorDescriptor(current)) {
+ if (desc.hasSetter() && !SameValue(desc.getSet(), current.getSet())){
+ throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+ }
+ if (desc.hasGetter() && !SameValue(desc.getGet(),current.getGet()))
+ throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+ }
+ }
+
+ // Send flags - enumerable and configurable are common - writable is
+ // only send to the data descriptor.
+ // Take special care if enumerable and configurable is not defined on
+ // desc (we need to preserve the existing values from current).
+ var flag = NONE;
+ if (desc.hasEnumerable()) {
+ flag |= desc.isEnumerable() ? 0 : DONT_ENUM;
+ } else if (!IS_UNDEFINED(current)) {
+ flag |= current.isEnumerable() ? 0 : DONT_ENUM;
} else {
- if (IS_FUNCTION(desc.getGet())) %DefineAccessor(obj, p, GETTER, desc.getGet(), flag);
- if (IS_FUNCTION(desc.getSet())) %DefineAccessor(obj, p, SETTER, desc.getSet(), flag);
+ flag |= DONT_ENUM;
+ }
+
+ if (desc.hasConfigurable()) {
+ flag |= desc.isConfigurable() ? 0 : DONT_DELETE;
+ } else if (!IS_UNDEFINED(current)) {
+ flag |= current.isConfigurable() ? 0 : DONT_DELETE;
+ } else
+ flag |= DONT_DELETE;
+
+ if (IsDataDescriptor(desc) || IsGenericDescriptor(desc)) {
+ flag |= desc.isWritable() ? 0 : READ_ONLY;
+ %DefineOrRedefineDataProperty(obj, p, desc.getValue(), flag);
+ } else {
+ if (desc.hasGetter() && IS_FUNCTION(desc.getGet())) {
+ %DefineOrRedefineAccessorProperty(obj, p, GETTER, desc.getGet(), flag);
+ }
+ if (desc.hasSetter() && IS_FUNCTION(desc.getSet())) {
+ %DefineOrRedefineAccessorProperty(obj, p, SETTER, desc.getSet(), flag);
+ }
}
return true;
}
@@ -522,9 +623,8 @@ function ObjectGetOwnPropertyNames(obj) {
if (%GetInterceptorInfo(obj) & 1) {
var indexedInterceptorNames =
%GetIndexedInterceptorElementNames(obj);
- if (indexedInterceptorNames) {
+ if (indexedInterceptorNames)
propertyNames = propertyNames.concat(indexedInterceptorNames);
- }
}
// Find all the named properties.
@@ -542,6 +642,10 @@ function ObjectGetOwnPropertyNames(obj) {
}
}
+ // Property names are expected to be strings.
+ for (var i = 0; i < propertyNames.length; ++i)
+ propertyNames[i] = ToString(propertyNames[i]);
+
return propertyNames;
}
@@ -558,10 +662,21 @@ function ObjectCreate(proto, properties) {
}
-// ES5 section 15.2.3.7. This version cannot cope with the properies already
-// being present on obj. Therefore it is not exposed as
-// Object.defineProperties yet.
+// ES5 section 15.2.3.6.
+function ObjectDefineProperty(obj, p, attributes) {
+ if ((!IS_OBJECT(obj) || IS_NULL_OR_UNDEFINED(obj)) && !IS_FUNCTION(obj))
+ throw MakeTypeError("obj_ctor_property_non_object", ["defineProperty"]);
+ var name = ToString(p);
+ var desc = ToPropertyDescriptor(attributes);
+ DefineOwnProperty(obj, name, desc, true);
+ return obj;
+}
+
+
+// ES5 section 15.2.3.7.
function ObjectDefineProperties(obj, properties) {
+ if ((!IS_OBJECT(obj) || IS_NULL_OR_UNDEFINED(obj)) && !IS_FUNCTION(obj))
+ throw MakeTypeError("obj_ctor_property_non_object", ["defineProperties"]);
var props = ToObject(properties);
var key_values = [];
for (var key in props) {
@@ -577,6 +692,7 @@ function ObjectDefineProperties(obj, properties) {
var desc = key_values[i + 1];
DefineOwnProperty(obj, key, desc, true);
}
+ return obj;
}
@@ -611,6 +727,8 @@ function SetupObject() {
InstallFunctions($Object, DONT_ENUM, $Array(
"keys", ObjectKeys,
"create", ObjectCreate,
+ "defineProperty", ObjectDefineProperty,
+ "defineProperties", ObjectDefineProperties,
"getPrototypeOf", ObjectGetPrototypeOf,
"getOwnPropertyDescriptor", ObjectGetOwnPropertyDescriptor,
"getOwnPropertyNames", ObjectGetOwnPropertyNames
@@ -796,7 +914,7 @@ function SetupNumber() {
"POSITIVE_INFINITY",
1/0,
DONT_ENUM | DONT_DELETE | READ_ONLY);
- %TransformToFastProperties($Number);
+ %ToFastProperties($Number);
// Setup non-enumerable functions on the Number prototype object.
InstallFunctions($Number.prototype, DONT_ENUM, $Array(
diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc
index 7583d1ca0..717bcc3e7 100644
--- a/deps/v8/src/version.cc
+++ b/deps/v8/src/version.cc
@@ -34,8 +34,8 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 2
#define MINOR_VERSION 1
-#define BUILD_NUMBER 0
-#define PATCH_LEVEL 0
+#define BUILD_NUMBER 1
+#define PATCH_LEVEL 1
#define CANDIDATE_VERSION false
// Define SONAME to have the SCons build the put a specific SONAME into the
diff --git a/deps/v8/src/virtual-frame.cc b/deps/v8/src/virtual-frame.cc
index 44e5fae44..3624e254b 100644
--- a/deps/v8/src/virtual-frame.cc
+++ b/deps/v8/src/virtual-frame.cc
@@ -48,7 +48,13 @@ VirtualFrame::VirtualFrame(VirtualFrame* original)
}
-FrameElement VirtualFrame::CopyElementAt(int index) {
+// Create a duplicate of an existing valid frame element.
+// We can pass an optional number type information that will override the
+// existing information about the backing element. The new information must
+// not conflict with the existing type information and must be equally or
+// more precise. The default parameter value kUninitialized means that there
+// is no additional information.
+FrameElement VirtualFrame::CopyElementAt(int index, NumberInfo::Type info) {
ASSERT(index >= 0);
ASSERT(index < element_count());
@@ -71,15 +77,26 @@ FrameElement VirtualFrame::CopyElementAt(int index) {
// Fall through.
case FrameElement::MEMORY: // Fall through.
- case FrameElement::REGISTER:
+ case FrameElement::REGISTER: {
// All copies are backed by memory or register locations.
result.set_type(FrameElement::COPY);
result.clear_copied();
result.clear_sync();
result.set_index(index);
elements_[index].set_copied();
+ // Update backing element's number information.
+ NumberInfo::Type existing = elements_[index].number_info();
+ ASSERT(existing != NumberInfo::kUninitialized);
+ // Assert that the new type information (a) does not conflict with the
+ // existing one and (b) is equally or more precise.
+ ASSERT((info == NumberInfo::kUninitialized) ||
+ (existing | info) != NumberInfo::kUninitialized);
+ ASSERT(existing <= info);
+ elements_[index].set_number_info(info != NumberInfo::kUninitialized
+ ? info
+ : existing);
break;
-
+ }
case FrameElement::INVALID:
// We should not try to copy invalid elements.
UNREACHABLE();
@@ -98,7 +115,7 @@ void VirtualFrame::Adjust(int count) {
ASSERT(stack_pointer_ == element_count() - 1);
for (int i = 0; i < count; i++) {
- elements_.Add(FrameElement::MemoryElement());
+ elements_.Add(FrameElement::MemoryElement(NumberInfo::kUnknown));
}
stack_pointer_ += count;
}
@@ -144,8 +161,16 @@ void VirtualFrame::SpillElementAt(int index) {
if (!elements_[index].is_valid()) return;
SyncElementAt(index);
+ // Number type information is preserved.
+ // Copies get their number information from their backing element.
+ NumberInfo::Type info;
+ if (!elements_[index].is_copy()) {
+ info = elements_[index].number_info();
+ } else {
+ info = elements_[elements_[index].index()].number_info();
+ }
// The element is now in memory. Its copied flag is preserved.
- FrameElement new_element = FrameElement::MemoryElement();
+ FrameElement new_element = FrameElement::MemoryElement(info);
if (elements_[index].is_copied()) {
new_element.set_copied();
}
@@ -268,7 +293,6 @@ void VirtualFrame::SetElementAt(int index, Result* value) {
InvalidateFrameSlotAt(frame_index);
- FrameElement new_element;
if (value->is_register()) {
if (is_used(value->reg())) {
// The register already appears on the frame. Either the existing
@@ -301,7 +325,8 @@ void VirtualFrame::SetElementAt(int index, Result* value) {
Use(value->reg(), frame_index);
elements_[frame_index] =
FrameElement::RegisterElement(value->reg(),
- FrameElement::NOT_SYNCED);
+ FrameElement::NOT_SYNCED,
+ value->number_info());
}
} else {
ASSERT(value->is_constant());
@@ -318,16 +343,15 @@ void VirtualFrame::PushFrameSlotAt(int index) {
}
-void VirtualFrame::Push(Register reg) {
+void VirtualFrame::Push(Register reg, NumberInfo::Type info) {
if (is_used(reg)) {
int index = register_location(reg);
- FrameElement element = CopyElementAt(index);
+ FrameElement element = CopyElementAt(index, info);
elements_.Add(element);
} else {
Use(reg, element_count());
FrameElement element =
- FrameElement::RegisterElement(reg,
- FrameElement::NOT_SYNCED);
+ FrameElement::RegisterElement(reg, FrameElement::NOT_SYNCED, info);
elements_.Add(element);
}
}
diff --git a/deps/v8/src/virtual-frame.h b/deps/v8/src/virtual-frame.h
index 0bf0ca255..220823ed4 100644
--- a/deps/v8/src/virtual-frame.h
+++ b/deps/v8/src/virtual-frame.h
@@ -37,6 +37,8 @@
#include "x64/virtual-frame-x64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/virtual-frame-arm.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "mips/virtual-frame-mips.h"
#else
#error Unsupported target architecture.
#endif
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc
index 9cfe98abe..1f97235ab 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/x64/assembler-x64.cc
@@ -224,7 +224,7 @@ void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
// -----------------------------------------------------------------------------
// Implementation of Operand
-Operand::Operand(Register base, int32_t disp): rex_(0) {
+Operand::Operand(Register base, int32_t disp) : rex_(0) {
len_ = 1;
if (base.is(rsp) || base.is(r12)) {
// SIB byte is needed to encode (rsp + offset) or (r12 + offset).
@@ -246,7 +246,7 @@ Operand::Operand(Register base, int32_t disp): rex_(0) {
Operand::Operand(Register base,
Register index,
ScaleFactor scale,
- int32_t disp): rex_(0) {
+ int32_t disp) : rex_(0) {
ASSERT(!index.is(rsp));
len_ = 1;
set_sib(scale, index, base);
@@ -264,8 +264,19 @@ Operand::Operand(Register base,
}
+Operand::Operand(Register index,
+ ScaleFactor scale,
+ int32_t disp) : rex_(0) {
+ ASSERT(!index.is(rsp));
+ len_ = 1;
+ set_modrm(0, rsp);
+ set_sib(scale, index, rbp);
+ set_disp32(disp);
+}
+
+
// -----------------------------------------------------------------------------
-// Implementation of Assembler
+// Implementation of Assembler.
#ifdef GENERATED_CODE_COVERAGE
static void InitCoverageLog();
@@ -276,7 +287,7 @@ byte* Assembler::spare_buffer_ = NULL;
Assembler::Assembler(void* buffer, int buffer_size)
: code_targets_(100) {
if (buffer == NULL) {
- // do our own buffer management
+ // Do our own buffer management.
if (buffer_size <= kMinimalBufferSize) {
buffer_size = kMinimalBufferSize;
@@ -293,7 +304,7 @@ Assembler::Assembler(void* buffer, int buffer_size)
buffer_size_ = buffer_size;
own_buffer_ = true;
} else {
- // use externally provided buffer instead
+ // Use externally provided buffer instead.
ASSERT(buffer_size > 0);
buffer_ = static_cast<byte*>(buffer);
buffer_size_ = buffer_size;
@@ -309,7 +320,7 @@ Assembler::Assembler(void* buffer, int buffer_size)
}
#endif
- // setup buffer pointers
+ // Setup buffer pointers.
ASSERT(buffer_ != NULL);
pc_ = buffer_;
reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
@@ -337,11 +348,10 @@ Assembler::~Assembler() {
void Assembler::GetCode(CodeDesc* desc) {
- // finalize code
- // (at this point overflow() may be true, but the gap ensures that
- // we are still not overlapping instructions and relocation info)
- ASSERT(pc_ <= reloc_info_writer.pos()); // no overlap
- // setup desc
+ // Finalize code (at this point overflow() may be true, but the gap ensures
+ // that we are still not overlapping instructions and relocation info).
+ ASSERT(pc_ <= reloc_info_writer.pos()); // No overlap.
+ // Setup code descriptor.
desc->buffer = buffer_;
desc->buffer_size = buffer_size_;
desc->instr_size = pc_offset();
@@ -370,7 +380,7 @@ void Assembler::bind_to(Label* L, int pos) {
int current = L->pos();
int next = long_at(current);
while (next != current) {
- // relative address, relative to point after address
+ // Relative address, relative to point after address.
int imm32 = pos - (current + sizeof(int32_t));
long_at_put(current, imm32);
current = next;
@@ -390,10 +400,10 @@ void Assembler::bind(Label* L) {
void Assembler::GrowBuffer() {
- ASSERT(buffer_overflow()); // should not call this otherwise
+ ASSERT(buffer_overflow());
if (!own_buffer_) FATAL("external code buffer is too small");
- // compute new buffer size
+ // Compute new buffer size.
CodeDesc desc; // the new buffer
if (buffer_size_ < 4*KB) {
desc.buffer_size = 4*KB;
@@ -407,7 +417,7 @@ void Assembler::GrowBuffer() {
V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
}
- // setup new buffer
+ // Setup new buffer.
desc.buffer = NewArray<byte>(desc.buffer_size);
desc.instr_size = pc_offset();
desc.reloc_size =
@@ -419,7 +429,7 @@ void Assembler::GrowBuffer() {
memset(desc.buffer, 0xCC, desc.buffer_size);
#endif
- // copy the data
+ // Copy the data.
intptr_t pc_delta = desc.buffer - buffer_;
intptr_t rc_delta = (desc.buffer + desc.buffer_size) -
(buffer_ + buffer_size_);
@@ -427,7 +437,7 @@ void Assembler::GrowBuffer() {
memmove(rc_delta + reloc_info_writer.pos(),
reloc_info_writer.pos(), desc.reloc_size);
- // switch buffers
+ // Switch buffers.
if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
spare_buffer_ = buffer_;
} else {
@@ -442,7 +452,7 @@ void Assembler::GrowBuffer() {
reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
reloc_info_writer.last_pc() + pc_delta);
- // relocate runtime entries
+ // Relocate runtime entries.
for (RelocIterator it(desc); !it.done(); it.next()) {
RelocInfo::Mode rmode = it.rinfo()->rmode();
if (rmode == RelocInfo::INTERNAL_REFERENCE) {
@@ -472,7 +482,7 @@ void Assembler::emit_operand(int code, const Operand& adr) {
}
-// Assembler Instruction implementations
+// Assembler Instruction implementations.
void Assembler::arithmetic_op(byte opcode, Register reg, const Operand& op) {
EnsureSpace ensure_space(this);
@@ -756,7 +766,7 @@ void Assembler::bts(const Operand& dst, Register src) {
void Assembler::call(Label* L) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
- // 1110 1000 #32-bit disp
+ // 1110 1000 #32-bit disp.
emit(0xE8);
if (L->is_bound()) {
int offset = L->pos() - pc_offset() - sizeof(int32_t);
@@ -777,7 +787,7 @@ void Assembler::call(Label* L) {
void Assembler::call(Handle<Code> target, RelocInfo::Mode rmode) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
- // 1110 1000 #32-bit disp
+ // 1110 1000 #32-bit disp.
emit(0xE8);
emit_code_target(target, rmode);
}
@@ -786,7 +796,7 @@ void Assembler::call(Handle<Code> target, RelocInfo::Mode rmode) {
void Assembler::call(Register adr) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
- // Opcode: FF /2 r64
+ // Opcode: FF /2 r64.
if (adr.high_bit()) {
emit_rex_64(adr);
}
@@ -798,7 +808,7 @@ void Assembler::call(Register adr) {
void Assembler::call(const Operand& op) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
- // Opcode: FF /2 m64
+ // Opcode: FF /2 m64.
emit_rex_64(op);
emit(0xFF);
emit_operand(2, op);
@@ -829,7 +839,7 @@ void Assembler::cmovq(Condition cc, Register dst, Register src) {
ASSERT(cc >= 0); // Use mov for unconditional moves.
EnsureSpace ensure_space(this);
last_pc_ = pc_;
- // Opcode: REX.W 0f 40 + cc /r
+ // Opcode: REX.W 0f 40 + cc /r.
emit_rex_64(dst, src);
emit(0x0f);
emit(0x40 + cc);
@@ -846,7 +856,7 @@ void Assembler::cmovq(Condition cc, Register dst, const Operand& src) {
ASSERT(cc >= 0);
EnsureSpace ensure_space(this);
last_pc_ = pc_;
- // Opcode: REX.W 0f 40 + cc /r
+ // Opcode: REX.W 0f 40 + cc /r.
emit_rex_64(dst, src);
emit(0x0f);
emit(0x40 + cc);
@@ -863,7 +873,7 @@ void Assembler::cmovl(Condition cc, Register dst, Register src) {
ASSERT(cc >= 0);
EnsureSpace ensure_space(this);
last_pc_ = pc_;
- // Opcode: 0f 40 + cc /r
+ // Opcode: 0f 40 + cc /r.
emit_optional_rex_32(dst, src);
emit(0x0f);
emit(0x40 + cc);
@@ -880,7 +890,7 @@ void Assembler::cmovl(Condition cc, Register dst, const Operand& src) {
ASSERT(cc >= 0);
EnsureSpace ensure_space(this);
last_pc_ = pc_;
- // Opcode: 0f 40 + cc /r
+ // Opcode: 0f 40 + cc /r.
emit_optional_rex_32(dst, src);
emit(0x0f);
emit(0x40 + cc);
@@ -1110,17 +1120,17 @@ void Assembler::j(Condition cc, Label* L) {
int offs = L->pos() - pc_offset();
ASSERT(offs <= 0);
if (is_int8(offs - short_size)) {
- // 0111 tttn #8-bit disp
+ // 0111 tttn #8-bit disp.
emit(0x70 | cc);
emit((offs - short_size) & 0xFF);
} else {
- // 0000 1111 1000 tttn #32-bit disp
+ // 0000 1111 1000 tttn #32-bit disp.
emit(0x0F);
emit(0x80 | cc);
emitl(offs - long_size);
}
} else if (L->is_linked()) {
- // 0000 1111 1000 tttn #32-bit disp
+ // 0000 1111 1000 tttn #32-bit disp.
emit(0x0F);
emit(0x80 | cc);
emitl(L->pos());
@@ -1142,7 +1152,7 @@ void Assembler::j(Condition cc,
EnsureSpace ensure_space(this);
last_pc_ = pc_;
ASSERT(is_uint4(cc));
- // 0000 1111 1000 tttn #32-bit disp
+ // 0000 1111 1000 tttn #32-bit disp.
emit(0x0F);
emit(0x80 | cc);
emit_code_target(target, rmode);
@@ -1156,21 +1166,21 @@ void Assembler::jmp(Label* L) {
int offs = L->pos() - pc_offset() - 1;
ASSERT(offs <= 0);
if (is_int8(offs - sizeof(int8_t))) {
- // 1110 1011 #8-bit disp
+ // 1110 1011 #8-bit disp.
emit(0xEB);
emit((offs - sizeof(int8_t)) & 0xFF);
} else {
- // 1110 1001 #32-bit disp
+ // 1110 1001 #32-bit disp.
emit(0xE9);
emitl(offs - sizeof(int32_t));
}
} else if (L->is_linked()) {
- // 1110 1001 #32-bit disp
+ // 1110 1001 #32-bit disp.
emit(0xE9);
emitl(L->pos());
L->link_to(pc_offset() - sizeof(int32_t));
} else {
- // 1110 1001 #32-bit disp
+ // 1110 1001 #32-bit disp.
ASSERT(L->is_unused());
emit(0xE9);
int32_t current = pc_offset();
@@ -1183,7 +1193,7 @@ void Assembler::jmp(Label* L) {
void Assembler::jmp(Handle<Code> target, RelocInfo::Mode rmode) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
- // 1110 1001 #32-bit disp
+ // 1110 1001 #32-bit disp.
emit(0xE9);
emit_code_target(target, rmode);
}
@@ -1192,7 +1202,7 @@ void Assembler::jmp(Handle<Code> target, RelocInfo::Mode rmode) {
void Assembler::jmp(Register target) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
- // Opcode FF/4 r64
+ // Opcode FF/4 r64.
if (target.high_bit()) {
emit_rex_64(target);
}
@@ -1204,7 +1214,7 @@ void Assembler::jmp(Register target) {
void Assembler::jmp(const Operand& src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
- // Opcode FF/4 m64
+ // Opcode FF/4 m64.
emit_optional_rex_32(src);
emit(0xFF);
emit_operand(0x4, src);
@@ -1413,10 +1423,8 @@ void Assembler::movq(const Operand& dst, Immediate value) {
}
-/*
- * Loads the ip-relative location of the src label into the target
- * location (as a 32-bit offset sign extended to 64-bit).
- */
+// Loads the ip-relative location of the src label into the target location
+// (as a 32-bit offset sign extended to 64-bit).
void Assembler::movl(const Operand& dst, Label* src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -2006,7 +2014,7 @@ void Assembler::testq(Register dst, Immediate mask) {
}
-// FPU instructions
+// FPU instructions.
void Assembler::fld(int i) {
@@ -2377,7 +2385,7 @@ void Assembler::emit_farith(int b1, int b2, int i) {
emit(b2 + i);
}
-// SSE 2 operations
+// SSE 2 operations.
void Assembler::movsd(const Operand& dst, XMMRegister src) {
EnsureSpace ensure_space(this);
@@ -2511,6 +2519,38 @@ void Assembler::divsd(XMMRegister dst, XMMRegister src) {
}
+void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0f);
+ emit(0x57);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::comisd(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0f);
+ emit(0x2f);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0f);
+ emit(0x2e);
+ emit_sse_operand(dst, src);
+}
+
void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) {
Register ireg = { reg.code() };
@@ -2527,7 +2567,7 @@ void Assembler::emit_sse_operand(XMMRegister dst, Register src) {
}
-// Relocation information implementations
+// Relocation information implementations.
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
ASSERT(rmode != RelocInfo::NONE);
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index 3f2aef0e4..6c6f6a34b 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -113,8 +113,8 @@ struct Register {
return code_ & 0x7;
}
- // (unfortunately we can't make this private in a struct when initializing
- // by assignment.)
+ // Unfortunately we can't make this private in a struct when initializing
+ // by assignment.
int code_;
};
@@ -308,7 +308,6 @@ enum ScaleFactor {
times_4 = 2,
times_8 = 3,
times_int_size = times_4,
- times_half_pointer_size = times_4,
times_pointer_size = times_8
};
@@ -1122,6 +1121,10 @@ class Assembler : public Malloced {
void mulsd(XMMRegister dst, XMMRegister src);
void divsd(XMMRegister dst, XMMRegister src);
+ void xorpd(XMMRegister dst, XMMRegister src);
+
+ void comisd(XMMRegister dst, XMMRegister src);
+ void ucomisd(XMMRegister dst, XMMRegister src);
void emit_sse_operand(XMMRegister dst, XMMRegister src);
void emit_sse_operand(XMMRegister reg, const Operand& adr);
@@ -1168,14 +1171,6 @@ class Assembler : public Malloced {
static const int kMaximalBufferSize = 512*MB;
static const int kMinimalBufferSize = 4*KB;
- protected:
- // void movsd(XMMRegister dst, const Operand& src);
- // void movsd(const Operand& dst, XMMRegister src);
-
- // void emit_sse_operand(XMMRegister reg, const Operand& adr);
- // void emit_sse_operand(XMMRegister dst, XMMRegister src);
-
-
private:
byte* addr_at(int pos) { return buffer_ + pos; }
byte byte_at(int pos) { return buffer_[pos]; }
diff --git a/deps/v8/src/x64/builtins-x64.cc b/deps/v8/src/x64/builtins-x64.cc
index 0b95bba60..b3c5e33fd 100644
--- a/deps/v8/src/x64/builtins-x64.cc
+++ b/deps/v8/src/x64/builtins-x64.cc
@@ -185,14 +185,14 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// Stack Layout:
- // rsp: return address
- // +1: Argument n
- // +2: Argument n-1
+ // rsp[0]: Return address
+ // rsp[1]: Argument n
+ // rsp[2]: Argument n-1
// ...
- // +n: Argument 1 = receiver
- // +n+1: Argument 0 = function to call
+ // rsp[n]: Argument 1
+ // rsp[n+1]: Receiver (function to call)
//
- // rax contains the number of arguments, n, not counting the function.
+ // rax contains the number of arguments, n, not counting the receiver.
//
// 1. Make sure we have at least one argument.
{ Label done;
@@ -205,31 +205,23 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ bind(&done);
}
- // 2. Get the function to call from the stack.
- { Label done, non_function, function;
- // The function to call is at position n+1 on the stack.
- __ movq(rdi, Operand(rsp, rax, times_pointer_size, +1 * kPointerSize));
- __ JumpIfSmi(rdi, &non_function);
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
- __ j(equal, &function);
-
- // Non-function called: Clear the function to force exception.
- __ bind(&non_function);
- __ xor_(rdi, rdi);
- __ jmp(&done);
+ // 2. Get the function to call (passed as receiver) from the stack, check
+ // if it is a function.
+ Label non_function;
+ // The function to call is at position n+1 on the stack.
+ __ movq(rdi, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
+ __ JumpIfSmi(rdi, &non_function);
+ __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
+ __ j(not_equal, &non_function);
- // Function called: Change context eagerly to get the right global object.
- __ bind(&function);
+ // 3a. Patch the first argument if necessary when calling a function.
+ Label shift_arguments;
+ { Label convert_to_object, use_global_receiver, patch_receiver;
+ // Change context eagerly in case we need the global receiver.
__ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
- __ bind(&done);
- }
-
- // 3. Make sure first argument is an object; convert if necessary.
- { Label call_to_object, use_global_receiver, patch_receiver, done;
__ movq(rbx, Operand(rsp, rax, times_pointer_size, 0));
-
- __ JumpIfSmi(rbx, &call_to_object);
+ __ JumpIfSmi(rbx, &convert_to_object);
__ CompareRoot(rbx, Heap::kNullValueRootIndex);
__ j(equal, &use_global_receiver);
@@ -237,31 +229,28 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ j(equal, &use_global_receiver);
__ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, rcx);
- __ j(below, &call_to_object);
+ __ j(below, &convert_to_object);
__ CmpInstanceType(rcx, LAST_JS_OBJECT_TYPE);
- __ j(below_equal, &done);
-
- __ bind(&call_to_object);
- __ EnterInternalFrame(); // preserves rax, rbx, rdi
+ __ j(below_equal, &shift_arguments);
- // Store the arguments count on the stack (smi tagged).
+ __ bind(&convert_to_object);
+ __ EnterInternalFrame(); // In order to preserve argument count.
__ Integer32ToSmi(rax, rax);
__ push(rax);
- __ push(rdi); // save edi across the call
__ push(rbx);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ movq(rbx, rax);
- __ pop(rdi); // restore edi after the call
- // Get the arguments count and untag it.
__ pop(rax);
__ SmiToInteger32(rax, rax);
-
__ LeaveInternalFrame();
+ // Restore the function to rdi.
+ __ movq(rdi, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
__ jmp(&patch_receiver);
- // Use the global receiver object from the called function as the receiver.
+ // Use the global receiver object from the called function as the
+ // receiver.
__ bind(&use_global_receiver);
const int kGlobalIndex =
Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
@@ -273,48 +262,57 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ bind(&patch_receiver);
__ movq(Operand(rsp, rax, times_pointer_size, 0), rbx);
- __ bind(&done);
+ __ jmp(&shift_arguments);
}
- // 4. Shift stuff one slot down the stack.
+
+ // 3b. Patch the first argument when calling a non-function. The
+ // CALL_NON_FUNCTION builtin expects the non-function callee as
+ // receiver, so overwrite the first argument which will ultimately
+ // become the receiver.
+ __ bind(&non_function);
+ __ movq(Operand(rsp, rax, times_pointer_size, 0), rdi);
+ __ xor_(rdi, rdi);
+
+ // 4. Shift arguments and return address one slot down on the stack
+ // (overwriting the original receiver). Adjust argument count to make
+ // the original first argument the new receiver.
+ __ bind(&shift_arguments);
{ Label loop;
- __ lea(rcx, Operand(rax, +1)); // +1 ~ copy receiver too
+ __ movq(rcx, rax);
__ bind(&loop);
__ movq(rbx, Operand(rsp, rcx, times_pointer_size, 0));
__ movq(Operand(rsp, rcx, times_pointer_size, 1 * kPointerSize), rbx);
__ decq(rcx);
- __ j(not_zero, &loop);
+ __ j(not_sign, &loop); // While non-negative (to copy return address).
+ __ pop(rbx); // Discard copy of return address.
+ __ decq(rax); // One fewer argument (first argument is new receiver).
}
- // 5. Remove TOS (copy of last arguments), but keep return address.
- __ pop(rbx);
- __ pop(rcx);
- __ push(rbx);
- __ decq(rax);
-
- // 6. Check that function really was a function and get the code to
- // call from the function and check that the number of expected
- // arguments matches what we're providing.
- { Label invoke, trampoline;
+ // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin.
+ { Label function;
__ testq(rdi, rdi);
- __ j(not_zero, &invoke);
+ __ j(not_zero, &function);
__ xor_(rbx, rbx);
__ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
- __ bind(&trampoline);
__ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
RelocInfo::CODE_TARGET);
-
- __ bind(&invoke);
- __ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movsxlq(rbx,
- FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
- __ movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
- __ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
- __ cmpq(rax, rbx);
- __ j(not_equal, &trampoline);
+ __ bind(&function);
}
- // 7. Jump (tail-call) to the code in register edx without checking arguments.
+ // 5b. Get the code to call from the function and check that the number of
+ // expected arguments matches what we're providing. If so, jump
+ // (tail-call) to the code in register edx without checking arguments.
+ __ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movsxlq(rbx,
+ FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
+ __ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
+ __ cmpq(rax, rbx);
+ __ j(not_equal,
+ Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
+ RelocInfo::CODE_TARGET);
+
ParameterCount expected(0);
__ InvokeCode(rdx, expected, expected, JUMP_FUNCTION);
}
@@ -586,6 +584,7 @@ static void AllocateJSArray(MacroAssembler* masm,
JSFunction::kPrototypeOrInitialMapOffset));
// Check whether an empty sized array is requested.
+ __ SmiToInteger64(array_size, array_size);
__ testq(array_size, array_size);
__ j(not_zero, &not_empty);
@@ -605,7 +604,7 @@ static void AllocateJSArray(MacroAssembler* masm,
__ bind(&not_empty);
ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
__ AllocateInNewSpace(JSArray::kSize + FixedArray::kHeaderSize,
- times_half_pointer_size, // array_size is a smi.
+ times_pointer_size,
array_size,
result,
elements_array_end,
@@ -618,19 +617,20 @@ static void AllocateJSArray(MacroAssembler* masm,
// result: JSObject
// elements_array: initial map
// elements_array_end: start of next object
- // array_size: size of array (smi)
+ // array_size: size of array
__ bind(&allocated);
__ movq(FieldOperand(result, JSObject::kMapOffset), elements_array);
__ Move(elements_array, Factory::empty_fixed_array());
__ movq(FieldOperand(result, JSArray::kPropertiesOffset), elements_array);
// Field JSArray::kElementsOffset is initialized later.
- __ movq(FieldOperand(result, JSArray::kLengthOffset), array_size);
+ __ Integer32ToSmi(scratch, array_size);
+ __ movq(FieldOperand(result, JSArray::kLengthOffset), scratch);
// Calculate the location of the elements array and set elements array member
// of the JSArray.
// result: JSObject
// elements_array_end: start of next object
- // array_size: size of array (smi)
+ // array_size: size of array
__ lea(elements_array, Operand(result, JSArray::kSize));
__ movq(FieldOperand(result, JSArray::kElementsOffset), elements_array);
@@ -638,9 +638,8 @@ static void AllocateJSArray(MacroAssembler* masm,
// result: JSObject
// elements_array: elements array
// elements_array_end: start of next object
- // array_size: size of array (smi)
+ // array_size: size of array
ASSERT(kSmiTag == 0);
- __ SmiToInteger64(array_size, array_size);
__ Move(FieldOperand(elements_array, JSObject::kMapOffset),
Factory::fixed_array_map());
Label not_empty_2, fill_array;
@@ -900,7 +899,11 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// edi: called object
// eax: number of arguments
__ bind(&non_function_call);
- // Set expected number of arguments to zero (not changing eax).
+ // CALL_NON_FUNCTION expects the non-function constructor as receiver
+ // (instead of the original receiver from the call site). The receiver is
+ // stack element argc+1.
+ __ movq(Operand(rsp, rax, times_pointer_size, kPointerSize), rdi);
+ // Set expected number of arguments to zero (not changing rax).
__ movq(rbx, Immediate(0));
__ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
__ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc
index 685c9286d..6c063f3e7 100644
--- a/deps/v8/src/x64/codegen-x64.cc
+++ b/deps/v8/src/x64/codegen-x64.cc
@@ -246,14 +246,10 @@ class FloatingPointHelper : public AllStatic {
// -----------------------------------------------------------------------------
// CodeGenerator implementation.
-CodeGenerator::CodeGenerator(MacroAssembler* masm,
- Handle<Script> script,
- bool is_eval)
- : is_eval_(is_eval),
- script_(script),
- deferred_(8),
+CodeGenerator::CodeGenerator(MacroAssembler* masm)
+ : deferred_(8),
masm_(masm),
- scope_(NULL),
+ info_(NULL),
frame_(NULL),
allocator_(NULL),
state_(NULL),
@@ -263,6 +259,9 @@ CodeGenerator::CodeGenerator(MacroAssembler* masm,
}
+Scope* CodeGenerator::scope() { return info_->function()->scope(); }
+
+
void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// Call the runtime to declare the globals. The inevitable call
// will sync frame elements to memory anyway, so we do it eagerly to
@@ -278,16 +277,12 @@ void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
}
-void CodeGenerator::Generate(FunctionLiteral* function,
- Mode mode,
- CompilationInfo* info) {
+void CodeGenerator::Generate(CompilationInfo* info, Mode mode) {
// Record the position for debugging purposes.
- CodeForFunctionPosition(function);
- ZoneList<Statement*>* body = function->body();
+ CodeForFunctionPosition(info->function());
// Initialize state.
- ASSERT(scope_ == NULL);
- scope_ = function->scope();
+ info_ = info;
ASSERT(allocator_ == NULL);
RegisterAllocator register_allocator(this);
allocator_ = &register_allocator;
@@ -302,7 +297,7 @@ void CodeGenerator::Generate(FunctionLiteral* function,
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
- function->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
+ info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
frame_->SpillAll();
__ int3();
}
@@ -328,7 +323,7 @@ void CodeGenerator::Generate(FunctionLiteral* function,
frame_->AllocateStackSlots();
// Allocate the local context if needed.
- int heap_slots = scope_->num_heap_slots();
+ int heap_slots = scope()->num_heap_slots();
if (heap_slots > 0) {
Comment cmnt(masm_, "[ allocate local context");
// Allocate local context.
@@ -358,7 +353,6 @@ void CodeGenerator::Generate(FunctionLiteral* function,
// 3) don't copy parameter operand code from SlotOperand!
{
Comment cmnt2(masm_, "[ copy context parameters into .context");
-
// Note that iteration order is relevant here! If we have the same
// parameter twice (e.g., function (x, y, x)), and that parameter
// needs to be copied into the context, it must be the last argument
@@ -367,15 +361,15 @@ void CodeGenerator::Generate(FunctionLiteral* function,
// order: such a parameter is copied repeatedly into the same
// context location and thus the last value is what is seen inside
// the function.
- for (int i = 0; i < scope_->num_parameters(); i++) {
- Variable* par = scope_->parameter(i);
+ for (int i = 0; i < scope()->num_parameters(); i++) {
+ Variable* par = scope()->parameter(i);
Slot* slot = par->slot();
if (slot != NULL && slot->type() == Slot::CONTEXT) {
// The use of SlotOperand below is safe in unspilled code
// because the slot is guaranteed to be a context slot.
//
// There are no parameters in the global scope.
- ASSERT(!scope_->is_global_scope());
+ ASSERT(!scope()->is_global_scope());
frame_->PushParameterAt(i);
Result value = frame_->Pop();
value.ToRegister();
@@ -403,9 +397,9 @@ void CodeGenerator::Generate(FunctionLiteral* function,
}
// Initialize ThisFunction reference if present.
- if (scope_->is_function_scope() && scope_->function() != NULL) {
+ if (scope()->is_function_scope() && scope()->function() != NULL) {
frame_->Push(Factory::the_hole_value());
- StoreToSlot(scope_->function()->slot(), NOT_CONST_INIT);
+ StoreToSlot(scope()->function()->slot(), NOT_CONST_INIT);
}
} else {
// When used as the secondary compiler for splitting, rbp, rsi,
@@ -423,12 +417,12 @@ void CodeGenerator::Generate(FunctionLiteral* function,
// Generate code to 'execute' declarations and initialize functions
// (source elements). In case of an illegal redeclaration we need to
// handle that instead of processing the declarations.
- if (scope_->HasIllegalRedeclaration()) {
+ if (scope()->HasIllegalRedeclaration()) {
Comment cmnt(masm_, "[ illegal redeclarations");
- scope_->VisitIllegalRedeclaration(this);
+ scope()->VisitIllegalRedeclaration(this);
} else {
Comment cmnt(masm_, "[ declarations");
- ProcessDeclarations(scope_->declarations());
+ ProcessDeclarations(scope()->declarations());
// Bail out if a stack-overflow exception occurred when processing
// declarations.
if (HasStackOverflow()) return;
@@ -443,7 +437,7 @@ void CodeGenerator::Generate(FunctionLiteral* function,
// Compile the body of the function in a vanilla state. Don't
// bother compiling all the code if the scope has an illegal
// redeclaration.
- if (!scope_->HasIllegalRedeclaration()) {
+ if (!scope()->HasIllegalRedeclaration()) {
Comment cmnt(masm_, "[ function body");
#ifdef DEBUG
bool is_builtin = Bootstrapper::IsActive();
@@ -454,14 +448,14 @@ void CodeGenerator::Generate(FunctionLiteral* function,
// Ignore the return value.
}
#endif
- VisitStatements(body);
+ VisitStatements(info->function()->body());
// Handle the return from the function.
if (has_valid_frame()) {
// If there is a valid frame, control flow can fall off the end of
// the body. In that case there is an implicit return statement.
ASSERT(!function_return_is_shadowed_);
- CodeForReturnPosition(function);
+ CodeForReturnPosition(info->function());
frame_->PrepareForReturn();
Result undefined(Factory::undefined_value());
if (function_return_.is_bound()) {
@@ -504,7 +498,6 @@ void CodeGenerator::Generate(FunctionLiteral* function,
// There is no need to delete the register allocator, it is a
// stack-allocated local.
allocator_ = NULL;
- scope_ = NULL;
}
void CodeGenerator::GenerateReturnSequence(Result* return_value) {
@@ -527,7 +520,7 @@ void CodeGenerator::GenerateReturnSequence(Result* return_value) {
// Leave the frame and return popping the arguments and the
// receiver.
frame_->Exit();
- masm_->ret((scope_->num_parameters() + 1) * kPointerSize);
+ masm_->ret((scope()->num_parameters() + 1) * kPointerSize);
#ifdef ENABLE_DEBUGGER_SUPPORT
// Add padding that will be overwritten by a debugger breakpoint.
// frame_->Exit() generates "movq rsp, rbp; pop rbp; ret k"
@@ -695,7 +688,7 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
// Load the receiver and the existing arguments object onto the
// expression stack. Avoid allocating the arguments object here.
Load(receiver);
- LoadFromSlot(scope_->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
+ LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
// Emit the source position information after having loaded the
// receiver and the arguments.
@@ -773,8 +766,8 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
__ j(equal, &adapted);
// No arguments adaptor frame. Copy fixed number of arguments.
- __ movq(rax, Immediate(scope_->num_parameters()));
- for (int i = 0; i < scope_->num_parameters(); i++) {
+ __ movq(rax, Immediate(scope()->num_parameters()));
+ for (int i = 0; i < scope()->num_parameters(); i++) {
__ push(frame_->ParameterAt(i));
}
__ jmp(&invoke);
@@ -1228,7 +1221,7 @@ void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
// Compare and branch to the body if true or the next test if
// false. Prefer the next test as a fall through.
ControlDestination dest(clause->body_target(), &next_test, false);
- Comparison(equal, true, &dest);
+ Comparison(node, equal, true, &dest);
// If the comparison fell through to the true target, jump to the
// actual body.
@@ -2225,8 +2218,7 @@ void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
// Spill everything, even constants, to the frame.
frame_->SpillAll();
- DebuggerStatementStub ces;
- frame_->CallStub(&ces, 0);
+ frame_->DebugBreak();
// Ignore the return value.
#endif
}
@@ -2263,7 +2255,7 @@ void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
// Build the function boilerplate and instantiate it.
Handle<JSFunction> boilerplate =
- Compiler::BuildBoilerplate(node, script_, this);
+ Compiler::BuildBoilerplate(node, script(), this);
// Check for stack-overflow exception.
if (HasStackOverflow()) return;
InstantiateBoilerplate(boilerplate);
@@ -2503,17 +2495,19 @@ void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
// Load the literals array of the function.
__ movq(literals.reg(),
FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
- // Literal array.
+
frame_->Push(&literals);
- // Literal index.
frame_->Push(Smi::FromInt(node->literal_index()));
- // Constant elements.
frame_->Push(node->constant_elements());
+ int length = node->values()->length();
Result clone;
if (node->depth() > 1) {
clone = frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3);
- } else {
+ } else if (length > FastCloneShallowArrayStub::kMaximumLength) {
clone = frame_->CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
+ } else {
+ FastCloneShallowArrayStub stub(length);
+ clone = frame_->CallStub(&stub, 3);
}
frame_->Push(&clone);
@@ -2763,9 +2757,6 @@ void CodeGenerator::VisitCall(Call* node) {
// JavaScript example: 'foo(1, 2, 3)' // foo is global
// ----------------------------------
- // Push the name of the function and the receiver onto the stack.
- frame_->Push(var->name());
-
// Pass the global object as the receiver and let the IC stub
// patch the stack to use the global proxy as 'this' in the
// invoked function.
@@ -2777,6 +2768,9 @@ void CodeGenerator::VisitCall(Call* node) {
Load(args->at(i));
}
+ // Push the name of the function on the frame.
+ frame_->Push(var->name());
+
// Call the IC initialization code.
CodeForSourcePosition(node->position());
Result result = frame_->CallCallIC(RelocInfo::CODE_TARGET_CONTEXT,
@@ -2784,7 +2778,7 @@ void CodeGenerator::VisitCall(Call* node) {
loop_nesting());
frame_->RestoreContextRegister();
// Replace the function on the stack with the result.
- frame_->SetElementAt(0, &result);
+ frame_->Push(&result);
} else if (var != NULL && var->slot() != NULL &&
var->slot()->type() == Slot::LOOKUP) {
@@ -2837,8 +2831,7 @@ void CodeGenerator::VisitCall(Call* node) {
node->position());
} else {
- // Push the name of the function and the receiver onto the stack.
- frame_->Push(name);
+ // Push the receiver onto the frame.
Load(property->obj());
// Load the arguments.
@@ -2847,14 +2840,16 @@ void CodeGenerator::VisitCall(Call* node) {
Load(args->at(i));
}
+ // Push the name of the function onto the frame.
+ frame_->Push(name);
+
// Call the IC initialization code.
CodeForSourcePosition(node->position());
Result result = frame_->CallCallIC(RelocInfo::CODE_TARGET,
arg_count,
loop_nesting());
frame_->RestoreContextRegister();
- // Replace the function on the stack with the result.
- frame_->SetElementAt(0, &result);
+ frame_->Push(&result);
}
} else {
@@ -2945,8 +2940,6 @@ void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
Runtime::Function* function = node->function();
if (function == NULL) {
- // Prepare stack for calling JS runtime function.
- frame_->Push(node->name());
// Push the builtins object found in the current global object.
Result temp = allocator()->Allocate();
ASSERT(temp.is_valid());
@@ -2964,11 +2957,12 @@ void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
if (function == NULL) {
// Call the JS runtime function.
+ frame_->Push(node->name());
Result answer = frame_->CallCallIC(RelocInfo::CODE_TARGET,
arg_count,
loop_nesting_);
frame_->RestoreContextRegister();
- frame_->SetElementAt(0, &answer);
+ frame_->Push(&answer);
} else {
// Call the C runtime function.
Result answer = frame_->CallRuntime(function, arg_count);
@@ -3077,7 +3071,6 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
case Token::SUB: {
GenericUnaryOpStub stub(Token::SUB, overwrite);
- // TODO(1222589): remove dependency of TOS being cached inside stub
Result operand = frame_->Pop();
Result answer = frame_->CallStub(&stub, &operand);
frame_->Push(&answer);
@@ -3593,7 +3586,7 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
}
Load(left);
Load(right);
- Comparison(cc, strict, destination());
+ Comparison(node, cc, strict, destination());
}
@@ -3610,7 +3603,7 @@ void CodeGenerator::GenerateArgumentsAccess(ZoneList<Expression*>* args) {
Load(args->at(0));
Result key = frame_->Pop();
// Explicitly create a constant result.
- Result count(Handle<Smi>(Smi::FromInt(scope_->num_parameters())));
+ Result count(Handle<Smi>(Smi::FromInt(scope()->num_parameters())));
// Call the shared stub to get to arguments[key].
ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
Result result = frame_->CallStub(&stub, &key, &count);
@@ -3719,7 +3712,7 @@ void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
ASSERT(args->length() == 0);
// ArgumentsAccessStub takes the parameter count as an input argument
// in register eax. Create a constant result for it.
- Result count(Handle<Smi>(Smi::FromInt(scope_->num_parameters())));
+ Result count(Handle<Smi>(Smi::FromInt(scope()->num_parameters())));
// Call the shared stub to get to the arguments.length.
ArgumentsAccessStub stub(ArgumentsAccessStub::READ_LENGTH);
Result result = frame_->CallStub(&stub, &count);
@@ -3978,6 +3971,17 @@ void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
}
+void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
+ ASSERT_EQ(args->length(), 1);
+
+ // Load the argument on the stack and jump to the runtime.
+ Load(args->at(0));
+
+ Result answer = frame_->CallRuntime(Runtime::kNumberToString, 1);
+ frame_->Push(&answer);
+}
+
+
void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
ASSERT_EQ(2, args->length());
@@ -4267,34 +4271,52 @@ void CodeGenerator::ToBoolean(ControlDestination* dest) {
// The value to convert should be popped from the frame.
Result value = frame_->Pop();
value.ToRegister();
- // Fast case checks.
- // 'false' => false.
- __ CompareRoot(value.reg(), Heap::kFalseValueRootIndex);
- dest->false_target()->Branch(equal);
+ if (value.is_number()) {
+ Comment cmnt(masm_, "ONLY_NUMBER");
+ // Fast case if NumberInfo indicates only numbers.
+ if (FLAG_debug_code) {
+ __ AbortIfNotNumber(value.reg(), "ToBoolean operand is not a number.");
+ }
+ // Smi => false iff zero.
+ __ SmiCompare(value.reg(), Smi::FromInt(0));
+ dest->false_target()->Branch(equal);
+ Condition is_smi = masm_->CheckSmi(value.reg());
+ dest->true_target()->Branch(is_smi);
+ __ fldz();
+ __ fld_d(FieldOperand(value.reg(), HeapNumber::kValueOffset));
+ __ FCmp();
+ value.Unuse();
+ dest->Split(not_zero);
+ } else {
+ // Fast case checks.
+ // 'false' => false.
+ __ CompareRoot(value.reg(), Heap::kFalseValueRootIndex);
+ dest->false_target()->Branch(equal);
- // 'true' => true.
- __ CompareRoot(value.reg(), Heap::kTrueValueRootIndex);
- dest->true_target()->Branch(equal);
+ // 'true' => true.
+ __ CompareRoot(value.reg(), Heap::kTrueValueRootIndex);
+ dest->true_target()->Branch(equal);
- // 'undefined' => false.
- __ CompareRoot(value.reg(), Heap::kUndefinedValueRootIndex);
- dest->false_target()->Branch(equal);
+ // 'undefined' => false.
+ __ CompareRoot(value.reg(), Heap::kUndefinedValueRootIndex);
+ dest->false_target()->Branch(equal);
- // Smi => false iff zero.
- __ SmiCompare(value.reg(), Smi::FromInt(0));
- dest->false_target()->Branch(equal);
- Condition is_smi = masm_->CheckSmi(value.reg());
- dest->true_target()->Branch(is_smi);
+ // Smi => false iff zero.
+ __ SmiCompare(value.reg(), Smi::FromInt(0));
+ dest->false_target()->Branch(equal);
+ Condition is_smi = masm_->CheckSmi(value.reg());
+ dest->true_target()->Branch(is_smi);
- // Call the stub for all other cases.
- frame_->Push(&value); // Undo the Pop() from above.
- ToBooleanStub stub;
- Result temp = frame_->CallStub(&stub, 1);
- // Convert the result to a condition code.
- __ testq(temp.reg(), temp.reg());
- temp.Unuse();
- dest->Split(not_equal);
+ // Call the stub for all other cases.
+ frame_->Push(&value); // Undo the Pop() from above.
+ ToBooleanStub stub;
+ Result temp = frame_->CallStub(&stub, 1);
+ // Convert the result to a condition code.
+ __ testq(temp.reg(), temp.reg());
+ temp.Unuse();
+ dest->Split(not_equal);
+ }
}
@@ -4789,13 +4811,13 @@ void CodeGenerator::LoadGlobalReceiver() {
}
-ArgumentsAllocationMode CodeGenerator::ArgumentsMode() const {
- if (scope_->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
- ASSERT(scope_->arguments_shadow() != NULL);
+ArgumentsAllocationMode CodeGenerator::ArgumentsMode() {
+ if (scope()->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
+ ASSERT(scope()->arguments_shadow() != NULL);
// We don't want to do lazy arguments allocation for functions that
// have heap-allocated contexts, because it interfers with the
// uninitialized const tracking in the context objects.
- return (scope_->num_heap_slots() > 0)
+ return (scope()->num_heap_slots() > 0)
? EAGER_ARGUMENTS_ALLOCATION
: LAZY_ARGUMENTS_ALLOCATION;
}
@@ -4815,14 +4837,14 @@ Result CodeGenerator::StoreArgumentsObject(bool initial) {
ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
frame_->PushFunction();
frame_->PushReceiverSlotAddress();
- frame_->Push(Smi::FromInt(scope_->num_parameters()));
+ frame_->Push(Smi::FromInt(scope()->num_parameters()));
Result result = frame_->CallStub(&stub, 3);
frame_->Push(&result);
}
- Variable* arguments = scope_->arguments()->var();
- Variable* shadow = scope_->arguments_shadow()->var();
+ Variable* arguments = scope()->arguments()->var();
+ Variable* shadow = scope()->arguments_shadow()->var();
ASSERT(arguments != NULL && arguments->slot() != NULL);
ASSERT(shadow != NULL && shadow->slot() != NULL);
JumpTarget done;
@@ -4831,7 +4853,7 @@ Result CodeGenerator::StoreArgumentsObject(bool initial) {
// We have to skip storing into the arguments slot if it has
// already been written to. This can happen if the a function
// has a local variable named 'arguments'.
- LoadFromSlot(scope_->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
+ LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
Result probe = frame_->Pop();
if (probe.is_constant()) {
// We have to skip updating the arguments object if it has been
@@ -4875,7 +4897,8 @@ void CodeGenerator::LoadTypeofExpression(Expression* expr) {
}
-void CodeGenerator::Comparison(Condition cc,
+void CodeGenerator::Comparison(AstNode* node,
+ Condition cc,
bool strict,
ControlDestination* dest) {
// Strict only makes sense for equality comparisons.
@@ -4922,7 +4945,8 @@ void CodeGenerator::Comparison(Condition cc,
default:
UNREACHABLE();
}
- } else { // Only one side is a constant Smi.
+ } else {
+ // Only one side is a constant Smi.
// If left side is a constant Smi, reverse the operands.
// Since one side is a constant Smi, conversion order does not matter.
if (left_side_constant_smi) {
@@ -4936,6 +4960,8 @@ void CodeGenerator::Comparison(Condition cc,
// Implement comparison against a constant Smi, inlining the case
// where both sides are Smis.
left_side.ToRegister();
+ Register left_reg = left_side.reg();
+ Handle<Object> right_val = right_side.handle();
// Here we split control flow to the stub call and inlined cases
// before finally splitting it to the control destination. We use
@@ -4943,12 +4969,48 @@ void CodeGenerator::Comparison(Condition cc,
// the first split. We manually handle the off-frame references
// by reconstituting them on the non-fall-through path.
JumpTarget is_smi;
- Register left_reg = left_side.reg();
- Handle<Object> right_val = right_side.handle();
Condition left_is_smi = masm_->CheckSmi(left_side.reg());
is_smi.Branch(left_is_smi);
+ bool is_for_loop_compare = (node->AsCompareOperation() != NULL)
+ && node->AsCompareOperation()->is_for_loop_condition();
+ if (!is_for_loop_compare && right_val->IsSmi()) {
+ // Right side is a constant smi and left side has been checked
+ // not to be a smi.
+ JumpTarget not_number;
+ __ Cmp(FieldOperand(left_reg, HeapObject::kMapOffset),
+ Factory::heap_number_map());
+ not_number.Branch(not_equal, &left_side);
+ __ movsd(xmm1,
+ FieldOperand(left_reg, HeapNumber::kValueOffset));
+ int value = Smi::cast(*right_val)->value();
+ if (value == 0) {
+ __ xorpd(xmm0, xmm0);
+ } else {
+ Result temp = allocator()->Allocate();
+ __ movl(temp.reg(), Immediate(value));
+ __ cvtlsi2sd(xmm0, temp.reg());
+ temp.Unuse();
+ }
+ __ ucomisd(xmm1, xmm0);
+ // Jump to builtin for NaN.
+ not_number.Branch(parity_even, &left_side);
+ left_side.Unuse();
+ Condition double_cc = cc;
+ switch (cc) {
+ case less: double_cc = below; break;
+ case equal: double_cc = equal; break;
+ case less_equal: double_cc = below_equal; break;
+ case greater: double_cc = above; break;
+ case greater_equal: double_cc = above_equal; break;
+ default: UNREACHABLE();
+ }
+ dest->true_target()->Branch(double_cc);
+ dest->false_target()->Jump();
+ not_number.Bind(&left_side);
+ }
+
// Setup and call the compare stub.
CompareStub stub(cc, strict);
Result result = frame_->CallStub(&stub, &left_side, &right_side);
@@ -5121,26 +5183,34 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op,
// Neither operand is known to be a string.
}
- bool left_is_smi = left.is_constant() && left.handle()->IsSmi();
- bool left_is_non_smi = left.is_constant() && !left.handle()->IsSmi();
- bool right_is_smi = right.is_constant() && right.handle()->IsSmi();
- bool right_is_non_smi = right.is_constant() && !right.handle()->IsSmi();
+ bool left_is_smi_constant = left.is_constant() && left.handle()->IsSmi();
+ bool left_is_non_smi_constant = left.is_constant() && !left.handle()->IsSmi();
+ bool right_is_smi_constant = right.is_constant() && right.handle()->IsSmi();
+ bool right_is_non_smi_constant =
+ right.is_constant() && !right.handle()->IsSmi();
- if (left_is_smi && right_is_smi) {
+ if (left_is_smi_constant && right_is_smi_constant) {
// Compute the constant result at compile time, and leave it on the frame.
int left_int = Smi::cast(*left.handle())->value();
int right_int = Smi::cast(*right.handle())->value();
if (FoldConstantSmis(op, left_int, right_int)) return;
}
+ // Get number type of left and right sub-expressions.
+ NumberInfo::Type operands_type =
+ NumberInfo::Combine(left.number_info(), right.number_info());
+
Result answer;
- if (left_is_non_smi || right_is_non_smi) {
- GenericBinaryOpStub stub(op, overwrite_mode, NO_SMI_CODE_IN_STUB);
+ if (left_is_non_smi_constant || right_is_non_smi_constant) {
+ GenericBinaryOpStub stub(op,
+ overwrite_mode,
+ NO_SMI_CODE_IN_STUB,
+ operands_type);
answer = stub.GenerateCall(masm_, frame_, &left, &right);
- } else if (right_is_smi) {
+ } else if (right_is_smi_constant) {
answer = ConstantSmiBinaryOperation(op, &left, right.handle(),
type, false, overwrite_mode);
- } else if (left_is_smi) {
+ } else if (left_is_smi_constant) {
answer = ConstantSmiBinaryOperation(op, &right, left.handle(),
type, true, overwrite_mode);
} else {
@@ -5152,10 +5222,62 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op,
if (loop_nesting() > 0 && (Token::IsBitOp(op) || type->IsLikelySmi())) {
answer = LikelySmiBinaryOperation(op, &left, &right, overwrite_mode);
} else {
- GenericBinaryOpStub stub(op, overwrite_mode, NO_GENERIC_BINARY_FLAGS);
+ GenericBinaryOpStub stub(op,
+ overwrite_mode,
+ NO_GENERIC_BINARY_FLAGS,
+ operands_type);
answer = stub.GenerateCall(masm_, frame_, &left, &right);
}
}
+
+ // Set NumberInfo of result according to the operation performed.
+ // We rely on the fact that smis have a 32 bit payload on x64.
+ ASSERT(kSmiValueSize == 32);
+ NumberInfo::Type result_type = NumberInfo::kUnknown;
+ switch (op) {
+ case Token::COMMA:
+ result_type = right.number_info();
+ break;
+ case Token::OR:
+ case Token::AND:
+ // Result type can be either of the two input types.
+ result_type = operands_type;
+ break;
+ case Token::BIT_OR:
+ case Token::BIT_XOR:
+ case Token::BIT_AND:
+ // Result is always a smi.
+ result_type = NumberInfo::kSmi;
+ break;
+ case Token::SAR:
+ case Token::SHL:
+ // Result is always a smi.
+ result_type = NumberInfo::kSmi;
+ break;
+ case Token::SHR:
+ // Result of x >>> y is always a smi if y >= 1, otherwise a number.
+ result_type = (right.is_constant() && right.handle()->IsSmi()
+ && Smi::cast(*right.handle())->value() >= 1)
+ ? NumberInfo::kSmi
+ : NumberInfo::kNumber;
+ break;
+ case Token::ADD:
+ // Result could be a string or a number. Check types of inputs.
+ result_type = NumberInfo::IsNumber(operands_type)
+ ? NumberInfo::kNumber
+ : NumberInfo::kUnknown;
+ break;
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV:
+ case Token::MOD:
+ // Result is always a number.
+ result_type = NumberInfo::kNumber;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ answer.set_number_info(result_type);
frame_->Push(&answer);
}
@@ -6228,6 +6350,63 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
}
+void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
+ // Stack layout on entry:
+ //
+ // [rsp + kPointerSize]: constant elements.
+ // [rsp + (2 * kPointerSize)]: literal index.
+ // [rsp + (3 * kPointerSize)]: literals array.
+
+ // All sizes here are multiples of kPointerSize.
+ int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
+ int size = JSArray::kSize + elements_size;
+
+ // Load boilerplate object into rcx and check if we need to create a
+ // boilerplate.
+ Label slow_case;
+ __ movq(rcx, Operand(rsp, 3 * kPointerSize));
+ __ movq(rax, Operand(rsp, 2 * kPointerSize));
+ SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
+ __ movq(rcx,
+ FieldOperand(rcx, index.reg, index.scale, FixedArray::kHeaderSize));
+ __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
+ __ j(equal, &slow_case);
+
+ // Allocate both the JS array and the elements array in one big
+ // allocation. This avoids multiple limit checks.
+ __ AllocateInNewSpace(size, rax, rbx, rdx, &slow_case, TAG_OBJECT);
+
+ // Copy the JS array part.
+ for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
+ if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
+ __ movq(rbx, FieldOperand(rcx, i));
+ __ movq(FieldOperand(rax, i), rbx);
+ }
+ }
+
+ if (length_ > 0) {
+ // Get hold of the elements array of the boilerplate and setup the
+ // elements pointer in the resulting object.
+ __ movq(rcx, FieldOperand(rcx, JSArray::kElementsOffset));
+ __ lea(rdx, Operand(rax, JSArray::kSize));
+ __ movq(FieldOperand(rax, JSArray::kElementsOffset), rdx);
+
+ // Copy the elements array.
+ for (int i = 0; i < elements_size; i += kPointerSize) {
+ __ movq(rbx, FieldOperand(rcx, i));
+ __ movq(FieldOperand(rdx, i), rbx);
+ }
+ }
+
+ // Return and remove the on-stack parameters.
+ __ ret(3 * kPointerSize);
+
+ __ bind(&slow_case);
+ ExternalReference runtime(Runtime::kCreateArrayLiteralShallow);
+ __ TailCallRuntime(runtime, 3, 1);
+}
+
+
void ToBooleanStub::Generate(MacroAssembler* masm) {
Label false_result, true_result, not_string;
__ movq(rax, Operand(rsp, 1 * kPointerSize));
@@ -7241,30 +7420,107 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
+ // rsp[0] : return address
+ // rsp[8] : number of parameters
+ // rsp[16] : receiver displacement
+ // rsp[24] : function
+
// The displacement is used for skipping the return address and the
// frame pointer on the stack. It is the offset of the last
// parameter (if any) relative to the frame pointer.
static const int kDisplacement = 2 * kPointerSize;
// Check if the calling frame is an arguments adaptor frame.
- Label runtime;
+ Label adaptor_frame, try_allocate, runtime;
__ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
__ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset),
Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(not_equal, &runtime);
- // Value in rcx is Smi encoded.
+ __ j(equal, &adaptor_frame);
+
+ // Get the length from the frame.
+ __ movq(rcx, Operand(rsp, 1 * kPointerSize));
+ __ jmp(&try_allocate);
// Patch the arguments.length and the parameters pointer.
+ __ bind(&adaptor_frame);
__ movq(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ movq(Operand(rsp, 1 * kPointerSize), rcx);
- SmiIndex index = masm->SmiToIndex(rcx, rcx, kPointerSizeLog2);
+ // Do not clobber the length index for the indexing operation since
+ // it is used compute the size for allocation later.
+ SmiIndex index = masm->SmiToIndex(rbx, rcx, kPointerSizeLog2);
__ lea(rdx, Operand(rdx, index.reg, index.scale, kDisplacement));
__ movq(Operand(rsp, 2 * kPointerSize), rdx);
+ // Try the new space allocation. Start out with computing the size of
+ // the arguments object and the elements array.
+ Label add_arguments_object;
+ __ bind(&try_allocate);
+ __ testq(rcx, rcx);
+ __ j(zero, &add_arguments_object);
+ index = masm->SmiToIndex(rcx, rcx, kPointerSizeLog2);
+ __ lea(rcx, Operand(index.reg, index.scale, FixedArray::kHeaderSize));
+ __ bind(&add_arguments_object);
+ __ addq(rcx, Immediate(Heap::kArgumentsObjectSize));
+
+ // Do the allocation of both objects in one go.
+ __ AllocateInNewSpace(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT);
+
+ // Get the arguments boilerplate from the current (global) context.
+ int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
+ __ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ movq(rdi, FieldOperand(rdi, GlobalObject::kGlobalContextOffset));
+ __ movq(rdi, Operand(rdi, offset));
+
+ // Copy the JS object part.
+ for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
+ __ movq(kScratchRegister, FieldOperand(rdi, i));
+ __ movq(FieldOperand(rax, i), kScratchRegister);
+ }
+
+ // Setup the callee in-object property.
+ ASSERT(Heap::arguments_callee_index == 0);
+ __ movq(kScratchRegister, Operand(rsp, 3 * kPointerSize));
+ __ movq(FieldOperand(rax, JSObject::kHeaderSize), kScratchRegister);
+
+ // Get the length (smi tagged) and set that as an in-object property too.
+ ASSERT(Heap::arguments_length_index == 1);
+ __ movq(rcx, Operand(rsp, 1 * kPointerSize));
+ __ movq(FieldOperand(rax, JSObject::kHeaderSize + kPointerSize), rcx);
+
+ // If there are no actual arguments, we're done.
+ Label done;
+ __ testq(rcx, rcx);
+ __ j(zero, &done);
+
+ // Get the parameters pointer from the stack and untag the length.
+ __ movq(rdx, Operand(rsp, 2 * kPointerSize));
+ __ SmiToInteger32(rcx, rcx);
+
+ // Setup the elements pointer in the allocated arguments object and
+ // initialize the header in the elements fixed array.
+ __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSize));
+ __ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi);
+ __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
+ __ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
+ __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
+
+ // Copy the fixed array slots.
+ Label loop;
+ __ bind(&loop);
+ __ movq(kScratchRegister, Operand(rdx, -1 * kPointerSize)); // Skip receiver.
+ __ movq(FieldOperand(rdi, FixedArray::kHeaderSize), kScratchRegister);
+ __ addq(rdi, Immediate(kPointerSize));
+ __ subq(rdx, Immediate(kPointerSize));
+ __ decq(rcx);
+ __ j(not_zero, &loop);
+
+ // Return and remove the on-stack parameters.
+ __ bind(&done);
+ __ ret(3 * kPointerSize);
+
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
- Runtime::Function* f = Runtime::FunctionForId(Runtime::kNewArgumentsFast);
- __ TailCallRuntime(ExternalReference(f), 3, f->result_size);
+ __ TailCallRuntime(ExternalReference(Runtime::kNewArgumentsFast), 3, 1);
}
@@ -7599,6 +7855,9 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
// Slow-case: Non-function called.
__ bind(&slow);
+ // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
+ // of the original receiver from the call site).
+ __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rdi);
__ Set(rax, argc_);
__ Set(rbx, 0);
__ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
@@ -7987,13 +8246,14 @@ const char* GenericBinaryOpStub::GetName() {
}
OS::SNPrintF(Vector<char>(name_, len),
- "GenericBinaryOpStub_%s_%s%s_%s%s_%s",
+ "GenericBinaryOpStub_%s_%s%s_%s%s_%s%s",
op_name,
overwrite_name,
(flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "",
args_in_registers_ ? "RegArgs" : "StackArgs",
args_reversed_ ? "_R" : "",
- use_sse3_ ? "SSE3" : "SSE2");
+ use_sse3_ ? "SSE3" : "SSE2",
+ NumberInfo::ToString(operands_type_));
return name_;
}
@@ -8019,6 +8279,8 @@ void GenericBinaryOpStub::GenerateCall(
}
} else if (left.is(left_arg)) {
__ movq(right_arg, right);
+ } else if (right.is(right_arg)) {
+ __ movq(left_arg, left);
} else if (left.is(right_arg)) {
if (IsOperationCommutative()) {
__ movq(left_arg, right);
@@ -8037,8 +8299,6 @@ void GenericBinaryOpStub::GenerateCall(
__ movq(right_arg, right);
__ movq(left_arg, left);
}
- } else if (right.is(right_arg)) {
- __ movq(left_arg, left);
} else {
// Order of moves is not important.
__ movq(left_arg, left);
@@ -8074,6 +8334,10 @@ void GenericBinaryOpStub::GenerateCall(
__ Move(left_arg, right);
SetArgsReversed();
} else {
+ // For non-commutative operations, left and right_arg might be
+ // the same register. Therefore, the order of the moves is
+ // important here in order to not overwrite left before moving
+ // it to left_arg.
__ movq(left_arg, left);
__ Move(right_arg, right);
}
@@ -8106,8 +8370,12 @@ void GenericBinaryOpStub::GenerateCall(
__ Move(right_arg, left);
SetArgsReversed();
} else {
- __ Move(left_arg, left);
+ // For non-commutative operations, right and left_arg might be
+ // the same register. Therefore, the order of the moves is
+ // important here in order to not overwrite right before moving
+ // it to right_arg.
__ movq(right_arg, right);
+ __ Move(left_arg, left);
}
// Update flags to indicate that arguments are in registers.
SetArgsInRegisters();
@@ -8309,7 +8577,15 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
case Token::DIV: {
// rax: y
// rdx: x
- FloatingPointHelper::CheckNumberOperands(masm, &call_runtime);
+ if (NumberInfo::IsNumber(operands_type_)) {
+ if (FLAG_debug_code) {
+ // Assert at runtime that inputs are only numbers.
+ __ AbortIfNotNumber(rdx, "GenericBinaryOpStub operand not a number.");
+ __ AbortIfNotNumber(rax, "GenericBinaryOpStub operand not a number.");
+ }
+ } else {
+ FloatingPointHelper::CheckNumberOperands(masm, &call_runtime);
+ }
// Fast-case: Both operands are numbers.
// xmm4 and xmm5 are volatile XMM registers.
FloatingPointHelper::LoadFloatOperands(masm, xmm4, xmm5);
diff --git a/deps/v8/src/x64/codegen-x64.h b/deps/v8/src/x64/codegen-x64.h
index a758e739b..345431241 100644
--- a/deps/v8/src/x64/codegen-x64.h
+++ b/deps/v8/src/x64/codegen-x64.h
@@ -305,19 +305,15 @@ class CodeGenerator: public AstVisitor {
// Takes a function literal, generates code for it. This function should only
// be called by compiler.cc.
- static Handle<Code> MakeCode(FunctionLiteral* fun,
- Handle<Script> script,
- bool is_eval,
- CompilationInfo* info);
+ static Handle<Code> MakeCode(CompilationInfo* info);
// Printing of AST, etc. as requested by flags.
- static void MakeCodePrologue(FunctionLiteral* fun);
+ static void MakeCodePrologue(CompilationInfo* info);
// Allocate and install the code.
- static Handle<Code> MakeCodeEpilogue(FunctionLiteral* fun,
- MacroAssembler* masm,
+ static Handle<Code> MakeCodeEpilogue(MacroAssembler* masm,
Code::Flags flags,
- Handle<Script> script);
+ CompilationInfo* info);
#ifdef ENABLE_LOGGING_AND_PROFILING
static bool ShouldGenerateLog(Expression* type);
@@ -328,7 +324,7 @@ class CodeGenerator: public AstVisitor {
// Accessors
MacroAssembler* masm() { return masm_; }
VirtualFrame* frame() const { return frame_; }
- Handle<Script> script() { return script_; }
+ inline Handle<Script> script();
bool has_valid_frame() const { return frame_ != NULL; }
@@ -352,16 +348,15 @@ class CodeGenerator: public AstVisitor {
private:
// Construction/Destruction
- CodeGenerator(MacroAssembler* masm, Handle<Script> script, bool is_eval);
+ explicit CodeGenerator(MacroAssembler* masm);
// Accessors
- Scope* scope() const { return scope_; }
+ inline bool is_eval();
+ Scope* scope();
// Generating deferred code.
void ProcessDeferred();
- bool is_eval() { return is_eval_; }
-
// State
ControlDestination* destination() const { return state_->destination(); }
@@ -390,7 +385,7 @@ class CodeGenerator: public AstVisitor {
void VisitStatementsAndSpill(ZoneList<Statement*>* statements);
// Main code generation function
- void Generate(FunctionLiteral* fun, Mode mode, CompilationInfo* info);
+ void Generate(CompilationInfo* info, Mode mode);
// Generate the return sequence code. Should be called no more than
// once per compiled function, immediately after binding the return
@@ -398,7 +393,7 @@ class CodeGenerator: public AstVisitor {
void GenerateReturnSequence(Result* return_value);
// Returns the arguments allocation mode.
- ArgumentsAllocationMode ArgumentsMode() const;
+ ArgumentsAllocationMode ArgumentsMode();
// Store the arguments object and allocate it if necessary.
Result StoreArgumentsObject(bool initial);
@@ -489,7 +484,8 @@ class CodeGenerator: public AstVisitor {
Result* right,
OverwriteMode overwrite_mode);
- void Comparison(Condition cc,
+ void Comparison(AstNode* node,
+ Condition cc,
bool strict,
ControlDestination* destination);
@@ -581,6 +577,9 @@ class CodeGenerator: public AstVisitor {
// Support for direct calls from JavaScript to native RegExp code.
void GenerateRegExpExec(ZoneList<Expression*>* args);
+ // Fast support for number to string.
+ void GenerateNumberToString(ZoneList<Expression*>* args);
+
// Simple condition analysis.
enum ConditionAnalysis {
ALWAYS_TRUE,
@@ -604,15 +603,14 @@ class CodeGenerator: public AstVisitor {
bool HasValidEntryRegisters();
#endif
- bool is_eval_; // Tells whether code is generated for eval.
- Handle<Script> script_;
ZoneList<DeferredCode*> deferred_;
// Assembler
MacroAssembler* masm_; // to generate code
+ CompilationInfo* info_;
+
// Code generation state
- Scope* scope_;
VirtualFrame* frame_;
RegisterAllocator* allocator_;
CodeGenState* state_;
@@ -660,13 +658,15 @@ class GenericBinaryOpStub: public CodeStub {
public:
GenericBinaryOpStub(Token::Value op,
OverwriteMode mode,
- GenericBinaryFlags flags)
+ GenericBinaryFlags flags,
+ NumberInfo::Type operands_type = NumberInfo::kUnknown)
: op_(op),
mode_(mode),
flags_(flags),
args_in_registers_(false),
args_reversed_(false),
- name_(NULL) {
+ name_(NULL),
+ operands_type_(operands_type) {
use_sse3_ = CpuFeatures::IsSupported(SSE3);
ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
@@ -691,28 +691,32 @@ class GenericBinaryOpStub: public CodeStub {
bool args_reversed_; // Left and right argument are swapped.
bool use_sse3_;
char* name_;
+ NumberInfo::Type operands_type_;
const char* GetName();
#ifdef DEBUG
void Print() {
- PrintF("GenericBinaryOpStub (op %s), "
- "(mode %d, flags %d, registers %d, reversed %d)\n",
+ PrintF("GenericBinaryOpStub %d (op %s), "
+ "(mode %d, flags %d, registers %d, reversed %d, only_numbers %s)\n",
+ MinorKey(),
Token::String(op_),
static_cast<int>(mode_),
static_cast<int>(flags_),
static_cast<int>(args_in_registers_),
- static_cast<int>(args_reversed_));
+ static_cast<int>(args_reversed_),
+ NumberInfo::ToString(operands_type_));
}
#endif
- // Minor key encoding in 16 bits FRASOOOOOOOOOOMM.
+ // Minor key encoding in 16 bits NNNFRASOOOOOOOMM.
class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 10> {};
- class SSE3Bits: public BitField<bool, 12, 1> {};
- class ArgsInRegistersBits: public BitField<bool, 13, 1> {};
- class ArgsReversedBits: public BitField<bool, 14, 1> {};
- class FlagBits: public BitField<GenericBinaryFlags, 15, 1> {};
+ class OpBits: public BitField<Token::Value, 2, 7> {};
+ class SSE3Bits: public BitField<bool, 9, 1> {};
+ class ArgsInRegistersBits: public BitField<bool, 10, 1> {};
+ class ArgsReversedBits: public BitField<bool, 11, 1> {};
+ class FlagBits: public BitField<GenericBinaryFlags, 12, 1> {};
+ class NumberInfoBits: public BitField<NumberInfo::Type, 13, 3> {};
Major MajorKey() { return GenericBinaryOp; }
int MinorKey() {
@@ -722,7 +726,8 @@ class GenericBinaryOpStub: public CodeStub {
| FlagBits::encode(flags_)
| SSE3Bits::encode(use_sse3_)
| ArgsInRegistersBits::encode(args_in_registers_)
- | ArgsReversedBits::encode(args_reversed_);
+ | ArgsReversedBits::encode(args_reversed_)
+ | NumberInfoBits::encode(operands_type_);
}
void Generate(MacroAssembler* masm);
diff --git a/deps/v8/src/x64/disasm-x64.cc b/deps/v8/src/x64/disasm-x64.cc
index ce3aae8a2..547daeeb1 100644
--- a/deps/v8/src/x64/disasm-x64.cc
+++ b/deps/v8/src/x64/disasm-x64.cc
@@ -993,7 +993,60 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
byte* current = data + 2;
// At return, "current" points to the start of the next instruction.
const char* mnemonic = TwoByteMnemonic(opcode);
- if (opcode == 0x1F) {
+ if (operand_size_ == 0x66) {
+ // 0x66 0x0F prefix.
+ int mod, regop, rm;
+ get_modrm(*current, &mod, &regop, &rm);
+ const char* mnemonic = "?";
+ if (opcode == 0x57) {
+ mnemonic = "xorpd";
+ } else if (opcode == 0x2E) {
+ mnemonic = "comisd";
+ } else if (opcode == 0x2F) {
+ mnemonic = "ucomisd";
+ } else {
+ UnimplementedInstruction();
+ }
+ AppendToBuffer("%s %s,", mnemonic, NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ } else if (group_1_prefix_ == 0xF2) {
+ // Beginning of instructions with prefix 0xF2.
+
+ if (opcode == 0x11 || opcode == 0x10) {
+ // MOVSD: Move scalar double-precision fp to/from/between XMM registers.
+ AppendToBuffer("movsd ");
+ int mod, regop, rm;
+ get_modrm(*current, &mod, &regop, &rm);
+ if (opcode == 0x11) {
+ current += PrintRightOperand(current);
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ } else {
+ AppendToBuffer("%s,", NameOfXMMRegister(regop));
+ current += PrintRightOperand(current);
+ }
+ } else if (opcode == 0x2A) {
+ // CVTSI2SD: integer to XMM double conversion.
+ int mod, regop, rm;
+ get_modrm(*current, &mod, &regop, &rm);
+ AppendToBuffer("%s %s,", mnemonic, NameOfXMMRegister(regop));
+ current += PrintRightOperand(current);
+ } else if ((opcode & 0xF8) == 0x58) {
+ // XMM arithmetic. Mnemonic was retrieved at the start of this function.
+ int mod, regop, rm;
+ get_modrm(*current, &mod, &regop, &rm);
+ AppendToBuffer("%s %s,", mnemonic, NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ } else {
+ UnimplementedInstruction();
+ }
+ } else if (opcode == 0x2C && group_1_prefix_ == 0xF3) {
+ // Instruction with prefix 0xF3.
+
+ // CVTTSS2SI: Convert scalar single-precision FP to dword integer.
+ // Assert that mod is not 3, so source is memory, not an XMM register.
+ ASSERT_NE(0xC0, *current & 0xC0);
+ current += PrintOperands("cvttss2si", REG_OPER_OP_ORDER, current);
+ } else if (opcode == 0x1F) {
// NOP
int mod, regop, rm;
get_modrm(*current, &mod, &regop, &rm);
@@ -1007,8 +1060,7 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
current += 4;
} // else no immediate displacement.
AppendToBuffer("nop");
-
- } else if (opcode == 0xA2 || opcode == 0x31) {
+ } else if (opcode == 0xA2 || opcode == 0x31) {
// RDTSC or CPUID
AppendToBuffer("%s", mnemonic);
@@ -1043,43 +1095,6 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
} else {
AppendToBuffer(",%s,cl", NameOfCPURegister(regop));
}
- } else if (group_1_prefix_ == 0xF2) {
- // Beginning of instructions with prefix 0xF2.
-
- if (opcode == 0x11 || opcode == 0x10) {
- // MOVSD: Move scalar double-precision fp to/from/between XMM registers.
- AppendToBuffer("movsd ");
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- if (opcode == 0x11) {
- current += PrintRightOperand(current);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
- } else {
- AppendToBuffer("%s,", NameOfXMMRegister(regop));
- current += PrintRightOperand(current);
- }
- } else if (opcode == 0x2A) {
- // CVTSI2SD: integer to XMM double conversion.
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("%s %s,", mnemonic, NameOfXMMRegister(regop));
- current += PrintRightOperand(current);
- } else if ((opcode & 0xF8) == 0x58) {
- // XMM arithmetic. Mnemonic was retrieved at the start of this function.
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("%s %s,", mnemonic, NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
- } else {
- UnimplementedInstruction();
- }
- } else if (opcode == 0x2C && group_1_prefix_ == 0xF3) {
- // Instruction with prefix 0xF3.
-
- // CVTTSS2SI: Convert scalar single-precision FP to dword integer.
- // Assert that mod is not 3, so source is memory, not an XMM register.
- ASSERT_NE(0xC0, *current & 0xC0);
- current += PrintOperands("cvttss2si", REG_OPER_OP_ORDER, current);
} else {
UnimplementedInstruction();
}
diff --git a/deps/v8/src/x64/fast-codegen-x64.cc b/deps/v8/src/x64/fast-codegen-x64.cc
index 12b5653e5..1af768554 100644
--- a/deps/v8/src/x64/fast-codegen-x64.cc
+++ b/deps/v8/src/x64/fast-codegen-x64.cc
@@ -35,78 +35,152 @@ namespace internal {
#define __ ACCESS_MASM(masm())
-void FastCodeGenerator::EmitLoadReceiver(Register reg) {
+Register FastCodeGenerator::accumulator0() { return rax; }
+Register FastCodeGenerator::accumulator1() { return rdx; }
+Register FastCodeGenerator::scratch0() { return rcx; }
+Register FastCodeGenerator::scratch1() { return rdi; }
+Register FastCodeGenerator::receiver_reg() { return rbx; }
+Register FastCodeGenerator::context_reg() { return rsi; }
+
+
+void FastCodeGenerator::EmitLoadReceiver() {
// Offset 2 is due to return address and saved frame pointer.
- int index = 2 + function()->scope()->num_parameters();
- __ movq(reg, Operand(rbp, index * kPointerSize));
+ int index = 2 + scope()->num_parameters();
+ __ movq(receiver_reg(), Operand(rbp, index * kPointerSize));
}
-void FastCodeGenerator::EmitReceiverMapCheck() {
- Comment cmnt(masm(), ";; MapCheck(this)");
- if (FLAG_print_ir) {
- PrintF("MapCheck(this)\n");
- }
+void FastCodeGenerator::EmitGlobalVariableLoad(Handle<Object> cell) {
+ ASSERT(!destination().is(no_reg));
+ ASSERT(cell->IsJSGlobalPropertyCell());
- EmitLoadReceiver(rdx);
- __ JumpIfSmi(rdx, bailout());
+ __ Move(destination(), cell);
+ __ movq(destination(),
+ FieldOperand(destination(), JSGlobalPropertyCell::kValueOffset));
+ if (FLAG_debug_code) {
+ __ Cmp(destination(), Factory::the_hole_value());
+ __ Check(not_equal, "DontDelete cells can't contain the hole");
+ }
- ASSERT(has_receiver() && receiver()->IsHeapObject());
- Handle<HeapObject> object = Handle<HeapObject>::cast(receiver());
- Handle<Map> map(object->map());
- __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset), map);
- __ j(not_equal, bailout());
+ // The loaded value is not known to be a smi.
+ clear_as_smi(destination());
}
-void FastCodeGenerator::EmitGlobalVariableLoad(Handle<String> name) {
- // Compile global variable accesses as load IC calls. The only live
- // registers are rsi (context) and possibly rdx (this). Both are also
- // saved in the stack and rsi is preserved by the call.
- __ push(CodeGenerator::GlobalObject());
- __ Move(rcx, name);
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
- if (has_this_properties()) {
- // Restore this.
- EmitLoadReceiver(rdx);
+void FastCodeGenerator::EmitThisPropertyStore(Handle<String> name) {
+ LookupResult lookup;
+ info()->receiver()->Lookup(*name, &lookup);
+
+ ASSERT(lookup.holder() == *info()->receiver());
+ ASSERT(lookup.type() == FIELD);
+ Handle<Map> map(Handle<HeapObject>::cast(info()->receiver())->map());
+ int index = lookup.GetFieldIndex() - map->inobject_properties();
+ int offset = index * kPointerSize;
+
+ // We will emit the write barrier unless the stored value is statically
+ // known to be a smi.
+ bool needs_write_barrier = !is_smi(accumulator0());
+
+ // Perform the store. Negative offsets are inobject properties.
+ if (offset < 0) {
+ offset += map->instance_size();
+ __ movq(FieldOperand(receiver_reg(), offset), accumulator0());
+ if (needs_write_barrier) {
+ // Preserve receiver from write barrier.
+ __ movq(scratch0(), receiver_reg());
+ }
} else {
- __ nop(); // Not test rax, indicates IC has no inlined code at call site.
+ offset += FixedArray::kHeaderSize;
+ __ movq(scratch0(),
+ FieldOperand(receiver_reg(), JSObject::kPropertiesOffset));
+ __ movq(FieldOperand(scratch0(), offset), accumulator0());
+ }
+
+ if (needs_write_barrier) {
+ if (destination().is(no_reg)) {
+ // After RecordWrite accumulator0 is only accidently a smi, but it is
+ // already marked as not known to be one.
+ __ RecordWrite(scratch0(), offset, accumulator0(), scratch1());
+ } else {
+ // Copy the value to the other accumulator to preserve a copy from the
+ // write barrier. One of the accumulators is available as a scratch
+ // register. Neither is a smi.
+ __ movq(accumulator1(), accumulator0());
+ clear_as_smi(accumulator1());
+ Register value_scratch = other_accumulator(destination());
+ __ RecordWrite(scratch0(), offset, value_scratch, scratch1());
+ }
+ } else if (destination().is(accumulator1())) {
+ __ movq(accumulator1(), accumulator0());
+ // Is a smi because we do not need the write barrier.
+ set_as_smi(accumulator1());
}
}
-void FastCodeGenerator::EmitThisPropertyStore(Handle<String> name) {
+void FastCodeGenerator::EmitThisPropertyLoad(Handle<String> name) {
+ ASSERT(!destination().is(no_reg));
LookupResult lookup;
- receiver()->Lookup(*name, &lookup);
+ info()->receiver()->Lookup(*name, &lookup);
- ASSERT(lookup.holder() == *receiver());
+ ASSERT(lookup.holder() == *info()->receiver());
ASSERT(lookup.type() == FIELD);
- Handle<Map> map(Handle<HeapObject>::cast(receiver())->map());
+ Handle<Map> map(Handle<HeapObject>::cast(info()->receiver())->map());
int index = lookup.GetFieldIndex() - map->inobject_properties();
int offset = index * kPointerSize;
- // Negative offsets are inobject properties.
+ // Perform the load. Negative offsets are inobject properties.
if (offset < 0) {
offset += map->instance_size();
- __ movq(rcx, rdx); // Copy receiver for write barrier.
+ __ movq(destination(), FieldOperand(receiver_reg(), offset));
} else {
offset += FixedArray::kHeaderSize;
- __ movq(rcx, FieldOperand(rdx, JSObject::kPropertiesOffset));
+ __ movq(scratch0(),
+ FieldOperand(receiver_reg(), JSObject::kPropertiesOffset));
+ __ movq(destination(), FieldOperand(scratch0(), offset));
+ }
+
+ // The loaded value is not known to be a smi.
+ clear_as_smi(destination());
+}
+
+
+void FastCodeGenerator::EmitBitOr() {
+ if (is_smi(accumulator0()) && is_smi(accumulator1())) {
+ // If both operands are known to be a smi then there is no need to check
+ // the operands or result.
+ if (destination().is(no_reg)) {
+ __ or_(accumulator1(), accumulator0());
+ } else {
+ // Leave the result in the destination register. Bitwise or is
+ // commutative.
+ __ or_(destination(), other_accumulator(destination()));
+ }
+ } else if (destination().is(no_reg)) {
+ // Result is not needed but do not clobber the operands in case of
+ // bailout.
+ __ movq(scratch0(), accumulator1());
+ __ or_(scratch0(), accumulator0());
+ __ JumpIfNotSmi(scratch0(), bailout());
+ } else {
+ // Preserve the destination operand in a scratch register in case of
+ // bailout.
+ __ movq(scratch0(), destination());
+ __ or_(destination(), other_accumulator(destination()));
+ __ JumpIfNotSmi(destination(), bailout());
}
- // Perform the store.
- __ movq(FieldOperand(rcx, offset), rax);
- // Preserve value from write barrier in case it's needed.
- __ movq(rbx, rax);
- __ RecordWrite(rcx, offset, rbx, rdi);
+
+
+ // If we didn't bailout, the result (in fact, both inputs too) is known to
+ // be a smi.
+ set_as_smi(accumulator0());
+ set_as_smi(accumulator1());
}
-void FastCodeGenerator::Generate(FunctionLiteral* fun, CompilationInfo* info) {
- ASSERT(function_ == NULL);
+void FastCodeGenerator::Generate(CompilationInfo* compilation_info) {
ASSERT(info_ == NULL);
- function_ = fun;
- info_ = info;
+ info_ = compilation_info;
// Save the caller's frame pointer and set up our own.
Comment prologue_cmnt(masm(), ";; Prologue");
@@ -117,18 +191,42 @@ void FastCodeGenerator::Generate(FunctionLiteral* fun, CompilationInfo* info) {
// Note that we keep a live register reference to esi (context) at this
// point.
- // Receiver (this) is allocated to rdx if there are this properties.
- if (has_this_properties()) EmitReceiverMapCheck();
+ // Receiver (this) is allocated to a fixed register.
+ if (info()->has_this_properties()) {
+ Comment cmnt(masm(), ";; MapCheck(this)");
+ if (FLAG_print_ir) {
+ PrintF("MapCheck(this)\n");
+ }
+ ASSERT(info()->has_receiver() && info()->receiver()->IsHeapObject());
+ Handle<HeapObject> object = Handle<HeapObject>::cast(info()->receiver());
+ Handle<Map> map(object->map());
+ EmitLoadReceiver();
+ __ CheckMap(receiver_reg(), map, bailout(), false);
+ }
+
+ // If there is a global variable access check if the global object is the
+ // same as at lazy-compilation time.
+ if (info()->has_globals()) {
+ Comment cmnt(masm(), ";; MapCheck(GLOBAL)");
+ if (FLAG_print_ir) {
+ PrintF("MapCheck(GLOBAL)\n");
+ }
+ ASSERT(info()->has_global_object());
+ Handle<Map> map(info()->global_object()->map());
+ __ movq(scratch0(), CodeGenerator::GlobalObject());
+ __ CheckMap(scratch0(), map, bailout(), true);
+ }
- VisitStatements(fun->body());
+ VisitStatements(info()->function()->body());
Comment return_cmnt(masm(), ";; Return(<undefined>)");
+ if (FLAG_print_ir) {
+ PrintF("Return(<undefined>)\n");
+ }
__ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
-
- Comment epilogue_cmnt(masm(), ";; Epilogue");
__ movq(rsp, rbp);
__ pop(rbp);
- __ ret((fun->scope()->num_parameters() + 1) * kPointerSize);
+ __ ret((scope()->num_parameters() + 1) * kPointerSize);
__ bind(&bailout_);
}
diff --git a/deps/v8/src/x64/full-codegen-x64.cc b/deps/v8/src/x64/full-codegen-x64.cc
index f5bbfafe6..30db660de 100644
--- a/deps/v8/src/x64/full-codegen-x64.cc
+++ b/deps/v8/src/x64/full-codegen-x64.cc
@@ -51,9 +51,10 @@ namespace internal {
//
// The function builds a JS frame. Please see JavaScriptFrameConstants in
// frames-x64.h for its layout.
-void FullCodeGenerator::Generate(FunctionLiteral* fun, Mode mode) {
- function_ = fun;
- SetFunctionPosition(fun);
+void FullCodeGenerator::Generate(CompilationInfo* info, Mode mode) {
+ ASSERT(info_ == NULL);
+ info_ = info;
+ SetFunctionPosition(function());
if (mode == PRIMARY) {
__ push(rbp); // Caller's frame pointer.
@@ -62,7 +63,7 @@ void FullCodeGenerator::Generate(FunctionLiteral* fun, Mode mode) {
__ push(rdi); // Callee's JS Function.
{ Comment cmnt(masm_, "[ Allocate locals");
- int locals_count = fun->scope()->num_stack_slots();
+ int locals_count = scope()->num_stack_slots();
if (locals_count == 1) {
__ PushRoot(Heap::kUndefinedValueRootIndex);
} else if (locals_count > 1) {
@@ -76,7 +77,7 @@ void FullCodeGenerator::Generate(FunctionLiteral* fun, Mode mode) {
bool function_in_register = true;
// Possibly allocate a local context.
- if (fun->scope()->num_heap_slots() > 0) {
+ if (scope()->num_heap_slots() > 0) {
Comment cmnt(masm_, "[ Allocate local context");
// Argument to NewContext is the function, which is still in rdi.
__ push(rdi);
@@ -87,9 +88,9 @@ void FullCodeGenerator::Generate(FunctionLiteral* fun, Mode mode) {
__ movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
// Copy any necessary parameters into the context.
- int num_parameters = fun->scope()->num_parameters();
+ int num_parameters = scope()->num_parameters();
for (int i = 0; i < num_parameters; i++) {
- Slot* slot = fun->scope()->parameter(i)->slot();
+ Slot* slot = scope()->parameter(i)->slot();
if (slot != NULL && slot->type() == Slot::CONTEXT) {
int parameter_offset = StandardFrameConstants::kCallerSPOffset +
(num_parameters - 1 - i) * kPointerSize;
@@ -108,7 +109,7 @@ void FullCodeGenerator::Generate(FunctionLiteral* fun, Mode mode) {
}
// Possibly allocate an arguments object.
- Variable* arguments = fun->scope()->arguments()->AsVariable();
+ Variable* arguments = scope()->arguments()->AsVariable();
if (arguments != NULL) {
// Arguments object must be allocated after the context object, in
// case the "arguments" or ".arguments" variables are in the context.
@@ -119,10 +120,11 @@ void FullCodeGenerator::Generate(FunctionLiteral* fun, Mode mode) {
__ push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
}
// The receiver is just before the parameters on the caller's stack.
- __ lea(rdx, Operand(rbp, StandardFrameConstants::kCallerSPOffset +
- fun->num_parameters() * kPointerSize));
+ int offset = scope()->num_parameters() * kPointerSize;
+ __ lea(rdx,
+ Operand(rbp, StandardFrameConstants::kCallerSPOffset + offset));
__ push(rdx);
- __ Push(Smi::FromInt(fun->num_parameters()));
+ __ Push(Smi::FromInt(scope()->num_parameters()));
// Arguments to ArgumentsAccessStub:
// function, receiver address, parameter count.
// The stub will rewrite receiver and parameter count if the previous
@@ -133,13 +135,13 @@ void FullCodeGenerator::Generate(FunctionLiteral* fun, Mode mode) {
__ movq(rcx, rax);
Move(arguments->slot(), rax, rbx, rdx);
Slot* dot_arguments_slot =
- fun->scope()->arguments_shadow()->AsVariable()->slot();
+ scope()->arguments_shadow()->AsVariable()->slot();
Move(dot_arguments_slot, rcx, rbx, rdx);
}
}
{ Comment cmnt(masm_, "[ Declarations");
- VisitDeclarations(fun->scope()->declarations());
+ VisitDeclarations(scope()->declarations());
}
{ Comment cmnt(masm_, "[ Stack check");
@@ -157,14 +159,14 @@ void FullCodeGenerator::Generate(FunctionLiteral* fun, Mode mode) {
{ Comment cmnt(masm_, "[ Body");
ASSERT(loop_depth() == 0);
- VisitStatements(fun->body());
+ VisitStatements(function()->body());
ASSERT(loop_depth() == 0);
}
{ Comment cmnt(masm_, "[ return <undefined>;");
// Emit a 'return undefined' in case control fell off the end of the body.
__ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
- EmitReturnSequence(function_->end_position());
+ EmitReturnSequence(function()->end_position());
}
}
@@ -190,7 +192,7 @@ void FullCodeGenerator::EmitReturnSequence(int position) {
// patch with the code required by the debugger.
__ movq(rsp, rbp);
__ pop(rbp);
- __ ret((function_->scope()->num_parameters() + 1) * kPointerSize);
+ __ ret((scope()->num_parameters() + 1) * kPointerSize);
#ifdef ENABLE_DEBUGGER_SUPPORT
// Add padding that will be overwritten by a debugger breakpoint. We
// have just generated "movq rsp, rbp; pop rbp; ret k" with length 7
@@ -629,7 +631,7 @@ MemOperand FullCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
return Operand(rbp, SlotOffset(slot));
case Slot::CONTEXT: {
int context_chain_length =
- function_->scope()->ContextChainLength(slot->var()->scope());
+ scope()->ContextChainLength(slot->var()->scope());
__ LoadContext(scratch, context_chain_length);
return CodeGenerator::ContextOperand(scratch, slot->index());
}
@@ -688,7 +690,7 @@ void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
// this specific context.
// The variable in the decl always resides in the current context.
- ASSERT_EQ(0, function_->scope()->ContextChainLength(var->scope()));
+ ASSERT_EQ(0, scope()->ContextChainLength(var->scope()));
if (FLAG_debug_code) {
// Check if we have the correct context pointer.
__ movq(rbx,
@@ -767,7 +769,7 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// Call the runtime to declare the globals.
__ push(rsi); // The context is the first argument.
__ Push(pairs);
- __ Push(Smi::FromInt(is_eval_ ? 1 : 0));
+ __ Push(Smi::FromInt(is_eval() ? 1 : 0));
__ CallRuntime(Runtime::kDeclareGlobals, 3);
// Return value is ignored.
}
@@ -778,7 +780,7 @@ void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
// Build the function boilerplate and instantiate it.
Handle<JSFunction> boilerplate =
- Compiler::BuildBoilerplate(expr, script_, this);
+ Compiler::BuildBoilerplate(expr, script(), this);
if (HasStackOverflow()) return;
ASSERT(boilerplate->IsBoilerplate());
@@ -1013,6 +1015,92 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
+void FullCodeGenerator::VisitAssignment(Assignment* expr) {
+ Comment cmnt(masm_, "[ Assignment");
+ ASSERT(expr->op() != Token::INIT_CONST);
+ // Left-hand side can only be a property, a global or a (parameter or local)
+ // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+ enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+ LhsKind assign_type = VARIABLE;
+ Property* prop = expr->target()->AsProperty();
+ if (prop != NULL) {
+ assign_type =
+ (prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
+ }
+
+ // Evaluate LHS expression.
+ switch (assign_type) {
+ case VARIABLE:
+ // Nothing to do here.
+ break;
+ case NAMED_PROPERTY:
+ if (expr->is_compound()) {
+ // We need the receiver both on the stack and in the accumulator.
+ VisitForValue(prop->obj(), kAccumulator);
+ __ push(result_register());
+ } else {
+ VisitForValue(prop->obj(), kStack);
+ }
+ break;
+ case KEYED_PROPERTY:
+ VisitForValue(prop->obj(), kStack);
+ VisitForValue(prop->key(), kStack);
+ break;
+ }
+
+ // If we have a compound assignment: Get value of LHS expression and
+ // store in on top of the stack.
+ if (expr->is_compound()) {
+ Location saved_location = location_;
+ location_ = kStack;
+ switch (assign_type) {
+ case VARIABLE:
+ EmitVariableLoad(expr->target()->AsVariableProxy()->var(),
+ Expression::kValue);
+ break;
+ case NAMED_PROPERTY:
+ EmitNamedPropertyLoad(prop);
+ __ push(result_register());
+ break;
+ case KEYED_PROPERTY:
+ EmitKeyedPropertyLoad(prop);
+ __ push(result_register());
+ break;
+ }
+ location_ = saved_location;
+ }
+
+ // Evaluate RHS expression.
+ Expression* rhs = expr->value();
+ VisitForValue(rhs, kAccumulator);
+
+ // If we have a compound assignment: Apply operator.
+ if (expr->is_compound()) {
+ Location saved_location = location_;
+ location_ = kAccumulator;
+ EmitBinaryOp(expr->binary_op(), Expression::kValue);
+ location_ = saved_location;
+ }
+
+ // Record source position before possible IC call.
+ SetSourcePosition(expr->position());
+
+ // Store the value.
+ switch (assign_type) {
+ case VARIABLE:
+ EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
+ context_);
+ break;
+ case NAMED_PROPERTY:
+ EmitNamedPropertyAssignment(expr);
+ break;
+ case KEYED_PROPERTY:
+ EmitKeyedPropertyAssignment(expr);
+ break;
+ }
+}
+
+
void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
@@ -1198,7 +1286,7 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
void FullCodeGenerator::EmitCallWithIC(Call* expr,
- Handle<Object> ignored,
+ Handle<Object> name,
RelocInfo::Mode mode) {
// Code common for calls using the IC.
ZoneList<Expression*>* args = expr->arguments();
@@ -1206,6 +1294,7 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr,
for (int i = 0; i < arg_count; i++) {
VisitForValue(args->at(i), kStack);
}
+ __ Move(rcx, name);
// Record source position for debugger.
SetSourcePosition(expr->position());
// Call the IC initialization code.
@@ -1215,8 +1304,7 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr,
__ Call(ic, mode);
// Restore context register.
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- // Discard the function left on TOS.
- DropAndApply(1, context_, rax);
+ Apply(context_, rax);
}
@@ -1248,7 +1336,6 @@ void FullCodeGenerator::VisitCall(Call* expr) {
UNREACHABLE();
} else if (var != NULL && !var->is_this() && var->is_global()) {
// Call to a global variable.
- __ Push(var->name());
// Push global object as receiver for the call IC lookup.
__ push(CodeGenerator::GlobalObject());
EmitCallWithIC(expr, var->name(), RelocInfo::CODE_TARGET_CONTEXT);
@@ -1262,7 +1349,6 @@ void FullCodeGenerator::VisitCall(Call* expr) {
Literal* key = prop->key()->AsLiteral();
if (key != NULL && key->handle()->IsSymbol()) {
// Call to a named property, use call IC.
- __ Push(key->handle());
VisitForValue(prop->obj(), kStack);
EmitCallWithIC(expr, key->handle(), RelocInfo::CODE_TARGET);
} else {
@@ -1353,7 +1439,6 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
if (expr->is_jsruntime()) {
// Prepare for calling JS runtime function.
- __ Push(expr->name());
__ movq(rax, CodeGenerator::GlobalObject());
__ push(FieldOperand(rax, GlobalObject::kBuiltinsOffset));
}
@@ -1365,18 +1450,17 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
}
if (expr->is_jsruntime()) {
- // Call the JS runtime function.
- Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
- NOT_IN_LOOP);
+ // Call the JS runtime function using a call IC.
+ __ Move(rcx, expr->name());
+ InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
+ Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count, in_loop);
__ call(ic, RelocInfo::CODE_TARGET);
// Restore context register.
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- // Discard the function left on TOS.
- DropAndApply(1, context_, rax);
} else {
__ CallRuntime(expr->function(), arg_count);
- Apply(context_, rax);
}
+ Apply(context_, rax);
}
diff --git a/deps/v8/src/x64/ic-x64.cc b/deps/v8/src/x64/ic-x64.cc
index 28bfd2ee6..0e93637f3 100644
--- a/deps/v8/src/x64/ic-x64.cc
+++ b/deps/v8/src/x64/ic-x64.cc
@@ -228,23 +228,37 @@ void KeyedStoreIC::RestoreInlinedVersion(Address address) {
}
-void KeyedLoadIC::Generate(MacroAssembler* masm,
- ExternalReference const& f) {
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rsp[0] : return address
// -- rsp[8] : name
// -- rsp[16] : receiver
// -----------------------------------
- __ movq(rax, Operand(rsp, kPointerSize));
- __ movq(rcx, Operand(rsp, 2 * kPointerSize));
__ pop(rbx);
- __ push(rcx); // receiver
- __ push(rax); // name
+ __ push(Operand(rsp, 1 * kPointerSize)); // receiver
+ __ push(Operand(rsp, 1 * kPointerSize)); // name
__ push(rbx); // return address
// Perform tail call to the entry.
- __ TailCallRuntime(f, 2, 1);
+ __ TailCallRuntime(ExternalReference(IC_Utility(kKeyedLoadIC_Miss)), 2, 1);
+}
+
+
+void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rsp[0] : return address
+ // -- rsp[8] : name
+ // -- rsp[16] : receiver
+ // -----------------------------------
+
+ __ pop(rbx);
+ __ push(Operand(rsp, 1 * kPointerSize)); // receiver
+ __ push(Operand(rsp, 1 * kPointerSize)); // name
+ __ push(rbx); // return address
+
+ // Perform tail call to the entry.
+ __ TailCallRuntime(ExternalReference(Runtime::kKeyedGetProperty), 2, 1);
}
@@ -317,7 +331,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Slow case: Load name and receiver from stack and jump to runtime.
__ bind(&slow);
__ IncrementCounter(&Counters::keyed_load_generic_slow, 1);
- Generate(masm, ExternalReference(Runtime::kKeyedGetProperty));
+ GenerateRuntimeGetProperty(masm);
__ bind(&check_string);
// The key is not a smi.
// Is it a string?
@@ -555,21 +569,54 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
// Slow case: Load name and receiver from stack and jump to runtime.
__ bind(&slow);
__ IncrementCounter(&Counters::keyed_load_external_array_slow, 1);
- Generate(masm, ExternalReference(Runtime::kKeyedGetProperty));
+ GenerateRuntimeGetProperty(masm);
}
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
+void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rsp[0] : return address
- // -- rsp[8] : name
+ // -- rsp[8] : key
// -- rsp[16] : receiver
// -----------------------------------
- Generate(masm, ExternalReference(IC_Utility(kKeyedLoadIC_Miss)));
+ Label slow;
+
+ // Load key and receiver.
+ __ movq(rax, Operand(rsp, kPointerSize));
+ __ movq(rcx, Operand(rsp, 2 * kPointerSize));
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(rcx, &slow);
+
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(rax, &slow);
+
+ // Get the map of the receiver.
+ __ movq(rdx, FieldOperand(rcx, HeapObject::kMapOffset));
+
+ // Check that it has indexed interceptor and access checks
+ // are not enabled for this object.
+ __ movb(rdx, FieldOperand(rdx, Map::kBitFieldOffset));
+ __ andb(rdx, Immediate(kSlowCaseBitFieldMask));
+ __ cmpb(rdx, Immediate(1 << Map::kHasIndexedInterceptor));
+ __ j(not_zero, &slow);
+
+ // Everything is fine, call runtime.
+ __ pop(rdx);
+ __ push(rcx); // receiver
+ __ push(rax); // key
+ __ push(rdx); // return address
+
+ // Perform tail call to the entry.
+ __ TailCallRuntime(ExternalReference(
+ IC_Utility(kKeyedLoadPropertyWithInterceptor)), 2, 1);
+
+ __ bind(&slow);
+ GenerateMiss(masm);
}
-void KeyedStoreIC::Generate(MacroAssembler* masm, ExternalReference const& f) {
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : value
// -- rsp[0] : return address
@@ -584,28 +631,26 @@ void KeyedStoreIC::Generate(MacroAssembler* masm, ExternalReference const& f) {
__ push(rcx); // return address
// Do tail-call to runtime routine.
- __ TailCallRuntime(f, 3, 1);
+ __ TailCallRuntime(ExternalReference(IC_Utility(kKeyedStoreIC_Miss)), 3, 1);
}
-void KeyedStoreIC::GenerateExtendStorage(MacroAssembler* masm) {
+void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : value
- // -- rcx : transition map
// -- rsp[0] : return address
// -- rsp[8] : key
// -- rsp[16] : receiver
// -----------------------------------
- __ pop(rbx);
+ __ pop(rcx);
__ push(Operand(rsp, 1 * kPointerSize)); // receiver
- __ push(rcx); // transition map
+ __ push(Operand(rsp, 1 * kPointerSize)); // key
__ push(rax); // value
- __ push(rbx); // return address
+ __ push(rcx); // return address
// Do tail-call to runtime routine.
- __ TailCallRuntime(
- ExternalReference(IC_Utility(kSharedStoreIC_ExtendStorage)), 3, 1);
+ __ TailCallRuntime(ExternalReference(Runtime::kSetProperty), 3, 1);
}
@@ -659,7 +704,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// Slow case: call runtime.
__ bind(&slow);
- Generate(masm, ExternalReference(Runtime::kSetProperty));
+ GenerateRuntimeSetProperty(masm);
// Check whether the elements is a pixel array.
// rax: value
@@ -923,23 +968,29 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
// Slow case: call runtime.
__ bind(&slow);
- Generate(masm, ExternalReference(Runtime::kSetProperty));
+ GenerateRuntimeSetProperty(masm);
}
void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
+ // ----------- S t a t e -------------
+ // rcx : function name
+ // rsp[0] : return address
+ // rsp[8] : argument argc
+ // rsp[16] : argument argc - 1
+ // ...
+ // rsp[argc * 8] : argument 1
+ // rsp[(argc + 1) * 8] : argument 0 = receiver
+ // -----------------------------------
// Get the receiver of the function from the stack; 1 ~ return address.
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
- // Get the name of the function to call from the stack.
- // 2 ~ receiver, return address.
- __ movq(rbx, Operand(rsp, (argc + 2) * kPointerSize));
// Enter an internal frame.
__ EnterInternalFrame();
// Push the receiver and the name of the function.
__ push(rdx);
- __ push(rbx);
+ __ push(rcx);
// Call the entry.
CEntryStub stub(1);
@@ -977,20 +1028,18 @@ Object* CallIC_Miss(Arguments args);
void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// ----------- S t a t e -------------
- // rsp[0] return address
- // rsp[8] argument argc
- // rsp[16] argument argc - 1
+ // rcx : function name
+ // rsp[0] : return address
+ // rsp[8] : argument argc
+ // rsp[16] : argument argc - 1
// ...
- // rsp[argc * 8] argument 1
- // rsp[(argc + 1) * 8] argument 0 = receiver
- // rsp[(argc + 2) * 8] function name
+ // rsp[argc * 8] : argument 1
+ // rsp[(argc + 1) * 8] : argument 0 = receiver
// -----------------------------------
Label number, non_number, non_string, boolean, probe, miss;
// Get the receiver of the function from the stack; 1 ~ return address.
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
- // Get the name of the function from the stack; 2 ~ return address, receiver
- __ movq(rcx, Operand(rsp, (argc + 2) * kPointerSize));
// Probe the stub cache.
Code::Flags flags =
@@ -1043,6 +1092,16 @@ static void GenerateNormalHelper(MacroAssembler* masm,
int argc,
bool is_global_object,
Label* miss) {
+ // ----------- S t a t e -------------
+ // rcx : function name
+ // rdx : receiver
+ // rsp[0] : return address
+ // rsp[8] : argument argc
+ // rsp[16] : argument argc - 1
+ // ...
+ // rsp[argc * 8] : argument 1
+ // rsp[(argc + 1) * 8] : argument 0 = receiver
+ // -----------------------------------
// Search dictionary - put result in register rdx.
GenerateDictionaryLoad(masm, miss, rax, rdx, rbx, rcx, CHECK_DICTIONARY);
@@ -1073,20 +1132,18 @@ static void GenerateNormalHelper(MacroAssembler* masm,
void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
// ----------- S t a t e -------------
- // rsp[0] return address
- // rsp[8] argument argc
- // rsp[16] argument argc - 1
+ // rcx : function name
+ // rsp[0] : return address
+ // rsp[8] : argument argc
+ // rsp[16] : argument argc - 1
// ...
- // rsp[argc * 8] argument 1
- // rsp[(argc + 1) * 8] argument 0 = receiver
- // rsp[(argc + 2) * 8] function name
+ // rsp[argc * 8] : argument 1
+ // rsp[(argc + 1) * 8] : argument 0 = receiver
// -----------------------------------
Label miss, global_object, non_global_object;
// Get the receiver of the function from the stack.
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
- // Get the name of the function from the stack.
- __ movq(rcx, Operand(rsp, (argc + 2) * kPointerSize));
// Check that the receiver isn't a smi.
__ JumpIfSmi(rdx, &miss);
@@ -1153,22 +1210,20 @@ void LoadIC::ClearInlinedVersion(Address address) {
}
-void LoadIC::Generate(MacroAssembler* masm, ExternalReference const& f) {
+void LoadIC::GenerateMiss(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rcx : name
// -- rsp[0] : return address
// -- rsp[8] : receiver
// -----------------------------------
- __ movq(rax, Operand(rsp, kPointerSize));
-
__ pop(rbx);
- __ push(rax); // receiver
+ __ push(Operand(rsp, 0)); // receiver
__ push(rcx); // name
__ push(rbx); // return address
// Perform tail call to the entry.
- __ TailCallRuntime(f, 2, 1);
+ __ TailCallRuntime(ExternalReference(IC_Utility(kLoadIC_Miss)), 2, 1);
}
@@ -1224,17 +1279,6 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
}
-void LoadIC::GenerateMiss(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rcx : name
- // -- rsp[0] : return address
- // -- rsp[8] : receiver
- // -----------------------------------
-
- Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
-}
-
-
void LoadIC::GenerateNormal(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rcx : name
@@ -1278,7 +1322,7 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
// Cache miss: Restore receiver from stack and jump to runtime.
__ bind(&miss);
__ movq(rax, Operand(rsp, 1 * kPointerSize));
- Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
+ GenerateMiss(masm);
}
@@ -1292,13 +1336,12 @@ void LoadIC::GenerateStringLength(MacroAssembler* masm) {
__ movq(rax, Operand(rsp, kPointerSize));
- StubCompiler::GenerateLoadStringLength(masm, rax, rdx, &miss);
+ StubCompiler::GenerateLoadStringLength(masm, rax, rdx, rbx, &miss);
__ bind(&miss);
StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
}
-
bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
// The address of the instruction following the call.
Address test_instruction_address =
@@ -1326,6 +1369,7 @@ bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
return true;
}
+
void StoreIC::GenerateMiss(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : value
@@ -1344,40 +1388,71 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) {
__ TailCallRuntime(ExternalReference(IC_Utility(kStoreIC_Miss)), 3, 1);
}
-void StoreIC::GenerateExtendStorage(MacroAssembler* masm) {
+
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : value
- // -- rcx : Map (target of map transition)
+ // -- rcx : name
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
- __ pop(rbx);
- __ push(rdx); // receiver
- __ push(rcx); // transition map
- __ push(rax); // value
- __ push(rbx); // return address
+ // Get the receiver from the stack and probe the stub cache.
+ Code::Flags flags = Code::ComputeFlags(Code::STORE_IC,
+ NOT_IN_LOOP,
+ MONOMORPHIC);
+ StubCache::GenerateProbe(masm, flags, rdx, rcx, rbx, no_reg);
- // Perform tail call to the entry.
- __ TailCallRuntime(
- ExternalReference(IC_Utility(kSharedStoreIC_ExtendStorage)), 3, 1);
+ // Cache miss: Jump to runtime.
+ GenerateMiss(masm);
}
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
+
+void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : name
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
+ //
+ // This accepts as a receiver anything JSObject::SetElementsLength accepts
+ // (currently anything except for external and pixel arrays which means
+ // anything with elements of FixedArray type.), but currently is restricted
+ // to JSArray.
+ // Value must be a number, but only smis are accepted as the most common case.
- // Get the receiver from the stack and probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(Code::STORE_IC,
- NOT_IN_LOOP,
- MONOMORPHIC);
- StubCache::GenerateProbe(masm, flags, rdx, rcx, rbx, no_reg);
+ Label miss;
+
+ Register receiver = rdx;
+ Register value = rax;
+ Register scratch = rbx;
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, &miss);
+
+ // Check that the object is a JS array.
+ __ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
+ __ j(not_equal, &miss);
+
+ // Check that elements are FixedArray.
+ __ movq(scratch, FieldOperand(receiver, JSArray::kElementsOffset));
+ __ CmpObjectType(scratch, FIXED_ARRAY_TYPE, scratch);
+ __ j(not_equal, &miss);
+
+ // Check that value is a smi.
+ __ JumpIfNotSmi(value, &miss);
+
+ // Prepare tail call to StoreIC_ArrayLength.
+ __ pop(scratch);
+ __ push(receiver);
+ __ push(value);
+ __ push(scratch); // return address
+
+ __ TailCallRuntime(ExternalReference(IC_Utility(kStoreIC_ArrayLength)), 2, 1);
+
+ __ bind(&miss);
- // Cache miss: Jump to runtime.
GenerateMiss(masm);
}
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index 96b45e842..90a9c75d9 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -39,7 +39,6 @@ namespace internal {
MacroAssembler::MacroAssembler(void* buffer, int size)
: Assembler(buffer, size),
- unresolved_(0),
generating_stub_(false),
allow_stub_calls_(true),
code_object_(Heap::undefined_value()) {
@@ -387,6 +386,16 @@ void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
}
+void MacroAssembler::CallExternalReference(const ExternalReference& ext,
+ int num_arguments) {
+ movq(rax, Immediate(num_arguments));
+ movq(rbx, ext);
+
+ CEntryStub stub(1);
+ CallStub(&stub);
+}
+
+
void MacroAssembler::TailCallRuntime(ExternalReference const& ext,
int num_arguments,
int result_size) {
@@ -415,38 +424,30 @@ void MacroAssembler::JumpToRuntime(const ExternalReference& ext,
}
-void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
- bool resolved;
- Handle<Code> code = ResolveBuiltin(id, &resolved);
-
- const char* name = Builtins::GetName(id);
- int argc = Builtins::GetArgumentsCount(id);
+void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag) {
+ // Calls are not allowed in some stubs.
+ ASSERT(flag == JUMP_FUNCTION || allow_stub_calls());
- movq(target, code, RelocInfo::EMBEDDED_OBJECT);
- if (!resolved) {
- uint32_t flags =
- Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
- Bootstrapper::FixupFlagsUseCodeObject::encode(true);
- Unresolved entry = { pc_offset() - sizeof(intptr_t), flags, name };
- unresolved_.Add(entry);
- }
- addq(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ // Rely on the assertion to check that the number of provided
+ // arguments match the expected number of arguments. Fake a
+ // parameter count to avoid emitting code to do the check.
+ ParameterCount expected(0);
+ GetBuiltinEntry(rdx, id);
+ InvokeCode(rdx, expected, expected, flag);
}
-Handle<Code> MacroAssembler::ResolveBuiltin(Builtins::JavaScript id,
- bool* resolved) {
- // Move the builtin function into the temporary function slot by
- // reading it from the builtins object. NOTE: We should be able to
- // reduce this to two instructions by putting the function table in
- // the global object instead of the "builtins" object and by using a
- // real register for the function.
- movq(rdx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- movq(rdx, FieldOperand(rdx, GlobalObject::kBuiltinsOffset));
+
+void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
+ // Load the JavaScript builtin function from the builtins object.
+ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ movq(rdi, FieldOperand(rdi, GlobalObject::kBuiltinsOffset));
int builtins_offset =
JSBuiltinsObject::kJSBuiltinsOffset + (id * kPointerSize);
- movq(rdi, FieldOperand(rdx, builtins_offset));
-
- return Builtins::GetCode(id, resolved);
+ movq(rdi, FieldOperand(rdi, builtins_offset));
+ // Load the code entry point from the function into the target register.
+ movq(target, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ movq(target, FieldOperand(target, SharedFunctionInfo::kCodeOffset));
+ addq(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
}
@@ -1585,6 +1586,29 @@ void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
}
+void MacroAssembler::CheckMap(Register obj,
+ Handle<Map> map,
+ Label* fail,
+ bool is_heap_object) {
+ if (!is_heap_object) {
+ JumpIfSmi(obj, fail);
+ }
+ Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
+ j(not_equal, fail);
+}
+
+
+void MacroAssembler::AbortIfNotNumber(Register object, const char* msg) {
+ Label ok;
+ Condition is_smi = CheckSmi(object);
+ j(is_smi, &ok);
+ Cmp(FieldOperand(object, HeapObject::kMapOffset),
+ Factory::heap_number_map());
+ Assert(equal, msg);
+ bind(&ok);
+}
+
+
Condition MacroAssembler::IsObjectStringType(Register heap_object,
Register map,
Register instance_type) {
@@ -1762,39 +1786,14 @@ void MacroAssembler::CopyRegistersFromStackToMemory(Register base,
}
}
-#endif // ENABLE_DEBUGGER_SUPPORT
-
-
-void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag) {
- bool resolved;
- Handle<Code> code = ResolveBuiltin(id, &resolved);
-
- // Calls are not allowed in some stubs.
- ASSERT(flag == JUMP_FUNCTION || allow_stub_calls());
-
- // Rely on the assertion to check that the number of provided
- // arguments match the expected number of arguments. Fake a
- // parameter count to avoid emitting code to do the check.
- ParameterCount expected(0);
- InvokeCode(Handle<Code>(code),
- expected,
- expected,
- RelocInfo::CODE_TARGET,
- flag);
-
- const char* name = Builtins::GetName(id);
- int argc = Builtins::GetArgumentsCount(id);
- // The target address for the jump is stored as an immediate at offset
- // kInvokeCodeAddressOffset.
- if (!resolved) {
- uint32_t flags =
- Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
- Bootstrapper::FixupFlagsUseCodeObject::encode(false);
- Unresolved entry =
- { pc_offset() - kCallTargetAddressOffset, flags, name };
- unresolved_.Add(entry);
- }
+void MacroAssembler::DebugBreak() {
+ ASSERT(allow_stub_calls());
+ xor_(rax, rax); // no arguments
+ movq(rbx, ExternalReference(Runtime::kDebugBreak));
+ CEntryStub ces(1);
+ Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
}
+#endif // ENABLE_DEBUGGER_SUPPORT
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
@@ -1914,6 +1913,21 @@ void MacroAssembler::InvokeFunction(Register function,
}
+void MacroAssembler::InvokeFunction(JSFunction* function,
+ const ParameterCount& actual,
+ InvokeFlag flag) {
+ ASSERT(function->is_compiled());
+ // Get the function and setup the context.
+ Move(rdi, Handle<JSFunction>(function));
+ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+
+ // Invoke the cached code.
+ Handle<Code> code(function->code());
+ ParameterCount expected(function->shared()->formal_parameter_count());
+ InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag);
+}
+
+
void MacroAssembler::EnterFrame(StackFrame::Type type) {
push(rbp);
movq(rbp, rsp);
@@ -1953,13 +1967,9 @@ void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode, int result_size) {
// Reserve room for entry stack pointer and push the debug marker.
ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
- push(Immediate(0)); // saved entry sp, patched before call
- if (mode == ExitFrame::MODE_DEBUG) {
- push(Immediate(0));
- } else {
- movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
- push(kScratchRegister);
- }
+ push(Immediate(0)); // Saved entry sp, patched before call.
+ movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
+ push(kScratchRegister); // Accessed from EditFrame::code_slot.
// Save the frame pointer and the context in top.
ExternalReference c_entry_fp_address(Top::k_c_entry_fp_address);
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index 2913274db..6deeddce7 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -98,6 +98,7 @@ class MacroAssembler: public Assembler {
void CopyRegistersFromStackToMemory(Register base,
Register scratch,
RegList regs);
+ void DebugBreak();
#endif
// ---------------------------------------------------------------------------
@@ -148,6 +149,10 @@ class MacroAssembler: public Assembler {
const ParameterCount& actual,
InvokeFlag flag);
+ void InvokeFunction(JSFunction* function,
+ const ParameterCount& actual,
+ InvokeFlag flag);
+
// Invoke specified builtin JavaScript function. Adds an entry to
// the unresolved list if the name does not resolve.
void InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag);
@@ -460,6 +465,14 @@ class MacroAssembler: public Assembler {
// Always use unsigned comparisons: above and below, not less and greater.
void CmpInstanceType(Register map, InstanceType type);
+ // Check if the map of an object is equal to a specified map and
+ // branch to label if not. Skip the smi check if not required
+ // (object is known to be a heap object)
+ void CheckMap(Register obj,
+ Handle<Map> map,
+ Label* fail,
+ bool is_heap_object);
+
// Check if the object in register heap_object is a string. Afterwards the
// register map contains the object map and the register instance_type
// contains the instance_type. The registers map and instance_type can be the
@@ -473,6 +486,9 @@ class MacroAssembler: public Assembler {
// jcc instructions (je, ja, jae, jb, jbe, je, and jz).
void FCmp();
+ // Abort execution if argument is not a number. Used in debug code.
+ void AbortIfNotNumber(Register object, const char* msg);
+
// ---------------------------------------------------------------------------
// Exception handling
@@ -635,6 +651,10 @@ class MacroAssembler: public Assembler {
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId id, int num_arguments);
+ // Convenience function: call an external reference.
+ void CallExternalReference(const ExternalReference& ext,
+ int num_arguments);
+
// Tail call of a runtime routine (jump).
// Like JumpToRuntime, but also takes care of passing the number
// of arguments.
@@ -671,13 +691,6 @@ class MacroAssembler: public Assembler {
void Ret();
- struct Unresolved {
- int pc;
- uint32_t flags; // see Bootstrapper::FixupFlags decoders/encoders.
- const char* name;
- };
- List<Unresolved>* unresolved() { return &unresolved_; }
-
Handle<Object> CodeObject() { return code_object_; }
@@ -709,11 +722,10 @@ class MacroAssembler: public Assembler {
bool allow_stub_calls() { return allow_stub_calls_; }
private:
- List<Unresolved> unresolved_;
bool generating_stub_;
bool allow_stub_calls_;
- Handle<Object> code_object_; // This handle will be patched with the code
- // object on installation.
+ // This handle will be patched with the code object on installation.
+ Handle<Object> code_object_;
// Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected,
@@ -723,18 +735,6 @@ class MacroAssembler: public Assembler {
Label* done,
InvokeFlag flag);
- // Prepares for a call or jump to a builtin by doing two things:
- // 1. Emits code that fetches the builtin's function object from the context
- // at runtime, and puts it in the register rdi.
- // 2. Fetches the builtin's code object, and returns it in a handle, at
- // compile time, so that later code can emit instructions to jump or call
- // the builtin directly. If the code object has not yet been created, it
- // returns the builtin code object for IllegalFunction, and sets the
- // output parameter "resolved" to false. Code that uses the return value
- // should then add the address and the builtin name to the list of fixups
- // called unresolved_, which is fixed up by the bootstrapper.
- Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
-
// Activation support.
void EnterFrame(StackFrame::Type type);
void LeaveFrame(StackFrame::Type type);
diff --git a/deps/v8/src/x64/stub-cache-x64.cc b/deps/v8/src/x64/stub-cache-x64.cc
index 693447b5c..9c8b4f75a 100644
--- a/deps/v8/src/x64/stub-cache-x64.cc
+++ b/deps/v8/src/x64/stub-cache-x64.cc
@@ -133,11 +133,10 @@ void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
}
-template <typename Pushable>
static void PushInterceptorArguments(MacroAssembler* masm,
Register receiver,
Register holder,
- Pushable name,
+ Register name,
JSObject* holder_obj) {
__ push(receiver);
__ push(holder);
@@ -201,8 +200,9 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
}
+// Both name_reg and receiver_reg are preserved on jumps to miss_label,
+// but may be destroyed if store is successful.
void StubCompiler::GenerateStoreField(MacroAssembler* masm,
- Builtins::Name storage_extend,
JSObject* object,
int index,
Map* transition,
@@ -231,9 +231,13 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
if ((transition != NULL) && (object->map()->unused_property_fields() == 0)) {
// The properties must be extended before we can store the value.
// We jump to a runtime call that extends the properties array.
- __ Move(rcx, Handle<Map>(transition));
- Handle<Code> ic(Builtins::builtin(storage_extend));
- __ Jump(ic, RelocInfo::CODE_TARGET);
+ __ pop(scratch); // Return address.
+ __ push(receiver_reg);
+ __ Push(Handle<Map>(transition));
+ __ push(rax);
+ __ push(scratch);
+ __ TailCallRuntime(
+ ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage)), 3, 1);
return;
}
@@ -314,38 +318,39 @@ static void GenerateStringCheck(MacroAssembler* masm,
void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
Register receiver,
- Register scratch,
+ Register scratch1,
+ Register scratch2,
Label* miss) {
- Label load_length, check_wrapper;
+ Label check_wrapper;
// Check if the object is a string leaving the instance type in the
// scratch register.
- GenerateStringCheck(masm, receiver, scratch, miss, &check_wrapper);
+ GenerateStringCheck(masm, receiver, scratch1, miss, &check_wrapper);
// Load length directly from the string.
- __ bind(&load_length);
__ movl(rax, FieldOperand(receiver, String::kLengthOffset));
__ Integer32ToSmi(rax, rax);
__ ret(0);
// Check if the object is a JSValue wrapper.
__ bind(&check_wrapper);
- __ cmpl(scratch, Immediate(JS_VALUE_TYPE));
+ __ cmpl(scratch1, Immediate(JS_VALUE_TYPE));
__ j(not_equal, miss);
// Check if the wrapped value is a string and load the length
// directly if it is.
- __ movq(receiver, FieldOperand(receiver, JSValue::kValueOffset));
- GenerateStringCheck(masm, receiver, scratch, miss, miss);
- __ jmp(&load_length);
+ __ movq(scratch2, FieldOperand(receiver, JSValue::kValueOffset));
+ GenerateStringCheck(masm, scratch2, scratch1, miss, miss);
+ __ movl(rax, FieldOperand(scratch2, String::kLengthOffset));
+ __ Integer32ToSmi(rax, rax);
+ __ ret(0);
}
-template <class Pushable>
static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
Register receiver,
Register holder,
- Pushable name,
+ Register name,
JSObject* holder_obj) {
PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
@@ -394,7 +399,7 @@ static void CompileLoadInterceptor(Compiler* compiler,
stub_compiler->CheckPrototypes(object, receiver, holder,
scratch1, scratch2, name, miss);
- if (lookup->IsValid() && lookup->IsCacheable()) {
+ if (lookup->IsProperty() && lookup->IsCacheable()) {
compiler->CompileCacheable(masm,
stub_compiler,
receiver,
@@ -430,7 +435,7 @@ class LoadInterceptorCompiler BASE_EMBEDDED {
LookupResult* lookup,
String* name,
Label* miss_label) {
- AccessorInfo* callback = 0;
+ AccessorInfo* callback = NULL;
bool optimize = false;
// So far the most popular follow ups for interceptor loads are FIELD
// and CALLBACKS, so inline only them, other cases may be added
@@ -553,8 +558,8 @@ class LoadInterceptorCompiler BASE_EMBEDDED {
class CallInterceptorCompiler BASE_EMBEDDED {
public:
- explicit CallInterceptorCompiler(const ParameterCount& arguments)
- : arguments_(arguments), argc_(arguments.immediate()) {}
+ CallInterceptorCompiler(const ParameterCount& arguments, Register name)
+ : arguments_(arguments), name_(name) {}
void CompileCacheable(MacroAssembler* masm,
StubCompiler* stub_compiler,
@@ -584,18 +589,20 @@ class CallInterceptorCompiler BASE_EMBEDDED {
return;
}
+ ASSERT(!lookup->holder()->IsGlobalObject());
+
__ EnterInternalFrame();
- __ push(holder); // save the holder
+ __ push(holder); // Save the holder.
+ __ push(name_); // Save the name.
- CompileCallLoadPropertyWithInterceptor(
- masm,
- receiver,
- holder,
- // Under EnterInternalFrame this refers to name.
- Operand(rbp, (argc_ + 3) * kPointerSize),
- holder_obj);
+ CompileCallLoadPropertyWithInterceptor(masm,
+ receiver,
+ holder,
+ name_,
+ holder_obj);
- __ pop(receiver); // restore holder
+ __ pop(name_); // Restore the name.
+ __ pop(receiver); // Restore the holder.
__ LeaveInternalFrame();
__ CompareRoot(rax, Heap::kNoInterceptorResultSentinelRootIndex);
@@ -607,22 +614,8 @@ class CallInterceptorCompiler BASE_EMBEDDED {
scratch2,
name,
miss_label);
- if (lookup->holder()->IsGlobalObject()) {
- __ movq(rdx, Operand(rsp, (argc_ + 1) * kPointerSize));
- __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
- __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rdx);
- }
- ASSERT(function->is_compiled());
- // Get the function and setup the context.
- __ Move(rdi, Handle<JSFunction>(function));
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
-
- // Jump to the cached code (tail call).
- Handle<Code> code(function->code());
- ParameterCount expected(function->shared()->formal_parameter_count());
- __ InvokeCode(code, expected, arguments_,
- RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+ __ InvokeFunction(function, arguments_, JUMP_FUNCTION);
__ bind(&invoke);
}
@@ -634,27 +627,26 @@ class CallInterceptorCompiler BASE_EMBEDDED {
JSObject* holder_obj,
Label* miss_label) {
__ EnterInternalFrame();
+ // Save the name_ register across the call.
+ __ push(name_);
PushInterceptorArguments(masm,
receiver,
holder,
- Operand(rbp, (argc_ + 3) * kPointerSize),
+ name_,
holder_obj);
- ExternalReference ref = ExternalReference(
- IC_Utility(IC::kLoadPropertyWithInterceptorForCall));
- __ movq(rax, Immediate(5));
- __ movq(rbx, ref);
-
- CEntryStub stub(1);
- __ CallStub(&stub);
+ __ CallExternalReference(
+ ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall)),
+ 5);
+ __ pop(name_);
__ LeaveInternalFrame();
}
private:
const ParameterCount& arguments_;
- int argc_;
+ Register name_;
};
@@ -669,14 +661,14 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
String* name,
StubCompiler::CheckType check) {
// ----------- S t a t e -------------
- // -----------------------------------
- // rsp[0] return address
- // rsp[8] argument argc
- // rsp[16] argument argc - 1
+ // rcx : function name
+ // rsp[0] : return address
+ // rsp[8] : argument argc
+ // rsp[16] : argument argc - 1
// ...
- // rsp[argc * 8] argument 1
- // rsp[(argc + 1) * 8] argument 0 = receiver
- // rsp[(argc + 2) * 8] function name
+ // rsp[argc * 8] : argument 1
+ // rsp[(argc + 1) * 8] : argument 0 = receiver
+ // -----------------------------------
Label miss;
@@ -697,7 +689,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
case RECEIVER_MAP_CHECK:
// Check that the maps haven't changed.
CheckPrototypes(JSObject::cast(object), rdx, holder,
- rbx, rcx, name, &miss);
+ rbx, rax, name, &miss);
// Patch the receiver on the stack with the global proxy if
// necessary.
@@ -713,13 +705,13 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
__ jmp(&miss);
} else {
// Check that the object is a two-byte string or a symbol.
- __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, rcx);
+ __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, rax);
__ j(above_equal, &miss);
// Check that the maps starting from the prototype haven't changed.
GenerateLoadGlobalFunctionPrototype(masm(),
Context::STRING_FUNCTION_INDEX,
- rcx);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), rcx, holder,
+ rax);
+ CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
rbx, rdx, name, &miss);
}
break;
@@ -732,14 +724,14 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
Label fast;
// Check that the object is a smi or a heap number.
__ JumpIfSmi(rdx, &fast);
- __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx);
+ __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rax);
__ j(not_equal, &miss);
__ bind(&fast);
// Check that the maps starting from the prototype haven't changed.
GenerateLoadGlobalFunctionPrototype(masm(),
Context::NUMBER_FUNCTION_INDEX,
- rcx);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), rcx, holder,
+ rax);
+ CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
rbx, rdx, name, &miss);
}
break;
@@ -760,8 +752,8 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
// Check that the maps starting from the prototype haven't changed.
GenerateLoadGlobalFunctionPrototype(masm(),
Context::BOOLEAN_FUNCTION_INDEX,
- rcx);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), rcx, holder,
+ rax);
+ CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
rbx, rdx, name, &miss);
}
break;
@@ -769,7 +761,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
case JSARRAY_HAS_FAST_ELEMENTS_CHECK:
CheckPrototypes(JSObject::cast(object), rdx, holder,
- rbx, rcx, name, &miss);
+ rbx, rax, name, &miss);
// Make sure object->HasFastElements().
// Get the elements array of the object.
__ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
@@ -783,16 +775,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
UNREACHABLE();
}
- // Get the function and setup the context.
- __ Move(rdi, Handle<JSFunction>(function));
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
-
- // Jump to the cached code (tail call).
- ASSERT(function->is_compiled());
- Handle<Code> code(function->code());
- ParameterCount expected(function->shared()->formal_parameter_count());
- __ InvokeCode(code, expected, arguments(),
- RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+ __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
// Handle call cache miss.
__ bind(&miss);
@@ -808,19 +791,19 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
}
-Object* CallStubCompiler::CompileCallField(Object* object,
+Object* CallStubCompiler::CompileCallField(JSObject* object,
JSObject* holder,
int index,
String* name) {
// ----------- S t a t e -------------
- // -----------------------------------
- // rsp[0] return address
- // rsp[8] argument argc
- // rsp[16] argument argc - 1
+ // rcx : function name
+ // rsp[0] : return address
+ // rsp[8] : argument argc
+ // rsp[16] : argument argc - 1
// ...
- // rsp[argc * 8] argument 1
- // rsp[(argc + 1) * 8] argument 0 = receiver
- // rsp[(argc + 2) * 8] function name
+ // rsp[argc * 8] : argument 1
+ // rsp[(argc + 1) * 8] : argument 0 = receiver
+ // -----------------------------------
Label miss;
// Get the receiver from the stack.
@@ -831,9 +814,7 @@ Object* CallStubCompiler::CompileCallField(Object* object,
__ JumpIfSmi(rdx, &miss);
// Do the right check and compute the holder register.
- Register reg =
- CheckPrototypes(JSObject::cast(object), rdx, holder,
- rbx, rcx, name, &miss);
+ Register reg = CheckPrototypes(object, rdx, holder, rbx, rax, name, &miss);
GenerateFastPropertyLoad(masm(), rdi, reg, holder, index);
@@ -862,10 +843,17 @@ Object* CallStubCompiler::CompileCallField(Object* object,
}
-Object* CallStubCompiler::CompileCallInterceptor(Object* object,
+Object* CallStubCompiler::CompileCallInterceptor(JSObject* object,
JSObject* holder,
String* name) {
// ----------- S t a t e -------------
+ // rcx : function name
+ // rsp[0] : return address
+ // rsp[8] : argument argc
+ // rsp[16] : argument argc - 1
+ // ...
+ // rsp[argc * 8] : argument 1
+ // rsp[(argc + 1) * 8] : argument 0 = receiver
// -----------------------------------
Label miss;
@@ -878,17 +866,17 @@ Object* CallStubCompiler::CompileCallInterceptor(Object* object,
// Get the receiver from the stack.
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
- CallInterceptorCompiler compiler(arguments());
+ CallInterceptorCompiler compiler(arguments(), rcx);
CompileLoadInterceptor(&compiler,
this,
masm(),
- JSObject::cast(object),
+ object,
holder,
name,
&lookup,
rdx,
rbx,
- rcx,
+ rdi,
&miss);
// Restore receiver.
@@ -920,7 +908,6 @@ Object* CallStubCompiler::CompileCallInterceptor(Object* object,
}
-
Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
GlobalObject* holder,
JSGlobalPropertyCell* cell,
@@ -928,13 +915,13 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
String* name) {
// ----------- S t a t e -------------
// -----------------------------------
- // rsp[0] return address
- // rsp[8] argument argc
- // rsp[16] argument argc - 1
+ // rcx : function name
+ // rsp[0] : return address
+ // rsp[8] : argument argc
+ // rsp[16] : argument argc - 1
// ...
- // rsp[argc * 8] argument 1
- // rsp[(argc + 1) * 8] argument 0 = receiver
- // rsp[(argc + 2) * 8] function name
+ // rsp[argc * 8] : argument 1
+ // rsp[(argc + 1) * 8] : argument 0 = receiver
Label miss;
// Get the number of arguments.
@@ -951,7 +938,7 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
}
// Check that the maps haven't changed.
- CheckPrototypes(object, rdx, holder, rbx, rcx, name, &miss);
+ CheckPrototypes(object, rdx, holder, rbx, rax, name, &miss);
// Get the value from the cell.
__ Move(rdi, Handle<JSGlobalPropertyCell>(cell));
@@ -965,12 +952,12 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
// function can all use this call IC. Before we load through the
// function, we have to verify that it still is a function.
__ JumpIfSmi(rdi, &miss);
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
+ __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rax);
__ j(not_equal, &miss);
// Check the shared function info. Make sure it hasn't changed.
- __ Move(rcx, Handle<SharedFunctionInfo>(function->shared()));
- __ cmpq(FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset), rcx);
+ __ Move(rax, Handle<SharedFunctionInfo>(function->shared()));
+ __ cmpq(FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset), rax);
__ j(not_equal, &miss);
} else {
__ Cmp(rdi, Handle<JSFunction>(function));
@@ -1325,7 +1312,7 @@ Object* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
__ Cmp(rax, Handle<String>(name));
__ j(not_equal, &miss);
- GenerateLoadStringLength(masm(), rcx, rdx, &miss);
+ GenerateLoadStringLength(masm(), rcx, rdx, rbx, &miss);
__ bind(&miss);
__ DecrementCounter(&Counters::keyed_load_string_length, 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -1397,9 +1384,8 @@ Object* StoreStubCompiler::CompileStoreField(JSObject* object,
// -----------------------------------
Label miss;
- // Generate store field code. Trashes the name register.
+ // Generate store field code. Preserves receiver and name on jump to miss.
GenerateStoreField(masm(),
- Builtins::StoreIC_ExtendStorage,
object,
index,
transition,
@@ -1408,7 +1394,6 @@ Object* StoreStubCompiler::CompileStoreField(JSObject* object,
// Handle store cache miss.
__ bind(&miss);
- __ Move(rcx, Handle<String>(name)); // restore name
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
__ Jump(ic, RelocInfo::CODE_TARGET);
@@ -1550,16 +1535,15 @@ Object* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
__ Cmp(rcx, Handle<String>(name));
__ j(not_equal, &miss);
- // Get the object from the stack.
- __ movq(rbx, Operand(rsp, 2 * kPointerSize));
+ // Get the receiver from the stack.
+ __ movq(rdx, Operand(rsp, 2 * kPointerSize));
- // Generate store field code. Trashes the name register.
+ // Generate store field code. Preserves receiver and name on jump to miss.
GenerateStoreField(masm(),
- Builtins::KeyedStoreIC_ExtendStorage,
object,
index,
transition,
- rbx, rcx, rdx,
+ rdx, rcx, rbx,
&miss);
// Handle store cache miss.
@@ -1665,7 +1649,11 @@ Register StubCompiler::CheckPrototypes(JSObject* object,
Register holder_reg,
Register scratch,
String* name,
+ int save_at_depth,
Label* miss) {
+ // TODO(602): support object saving.
+ ASSERT(save_at_depth == kInvalidProtoDepth);
+
// Check that the maps haven't changed.
Register result =
__ CheckMaps(object, object_reg, holder, holder_reg, scratch, miss);
diff --git a/deps/v8/src/x64/virtual-frame-x64.cc b/deps/v8/src/x64/virtual-frame-x64.cc
index cb93d5d46..a0e883c8d 100644
--- a/deps/v8/src/x64/virtual-frame-x64.cc
+++ b/deps/v8/src/x64/virtual-frame-x64.cc
@@ -45,7 +45,7 @@ VirtualFrame::VirtualFrame()
: elements_(parameter_count() + local_count() + kPreallocatedElements),
stack_pointer_(parameter_count() + 1) { // 0-based index of TOS.
for (int i = 0; i <= stack_pointer_; i++) {
- elements_.Add(FrameElement::MemoryElement());
+ elements_.Add(FrameElement::MemoryElement(NumberInfo::kUnknown));
}
for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
register_locations_[i] = kIllegalIndex;
@@ -193,25 +193,25 @@ void VirtualFrame::EmitPop(const Operand& operand) {
}
-void VirtualFrame::EmitPush(Register reg) {
+void VirtualFrame::EmitPush(Register reg, NumberInfo::Type info) {
ASSERT(stack_pointer_ == element_count() - 1);
- elements_.Add(FrameElement::MemoryElement());
+ elements_.Add(FrameElement::MemoryElement(info));
stack_pointer_++;
__ push(reg);
}
-void VirtualFrame::EmitPush(const Operand& operand) {
+void VirtualFrame::EmitPush(const Operand& operand, NumberInfo::Type info) {
ASSERT(stack_pointer_ == element_count() - 1);
- elements_.Add(FrameElement::MemoryElement());
+ elements_.Add(FrameElement::MemoryElement(info));
stack_pointer_++;
__ push(operand);
}
-void VirtualFrame::EmitPush(Immediate immediate) {
+void VirtualFrame::EmitPush(Immediate immediate, NumberInfo::Type info) {
ASSERT(stack_pointer_ == element_count() - 1);
- elements_.Add(FrameElement::MemoryElement());
+ elements_.Add(FrameElement::MemoryElement(info));
stack_pointer_++;
__ push(immediate);
}
@@ -219,7 +219,7 @@ void VirtualFrame::EmitPush(Immediate immediate) {
void VirtualFrame::EmitPush(Smi* smi_value) {
ASSERT(stack_pointer_ == element_count() - 1);
- elements_.Add(FrameElement::MemoryElement());
+ elements_.Add(FrameElement::MemoryElement(NumberInfo::kSmi));
stack_pointer_++;
__ Push(smi_value);
}
@@ -227,15 +227,21 @@ void VirtualFrame::EmitPush(Smi* smi_value) {
void VirtualFrame::EmitPush(Handle<Object> value) {
ASSERT(stack_pointer_ == element_count() - 1);
- elements_.Add(FrameElement::MemoryElement());
+ NumberInfo::Type info = NumberInfo::kUnknown;
+ if (value->IsSmi()) {
+ info = NumberInfo::kSmi;
+ } else if (value->IsHeapNumber()) {
+ info = NumberInfo::kHeapNumber;
+ }
+ elements_.Add(FrameElement::MemoryElement(info));
stack_pointer_++;
__ Push(value);
}
-void VirtualFrame::EmitPush(Heap::RootListIndex index) {
+void VirtualFrame::EmitPush(Heap::RootListIndex index, NumberInfo::Type info) {
ASSERT(stack_pointer_ == element_count() - 1);
- elements_.Add(FrameElement::MemoryElement());
+ elements_.Add(FrameElement::MemoryElement(info));
stack_pointer_++;
__ PushRoot(index);
}
@@ -305,10 +311,14 @@ int VirtualFrame::InvalidateFrameSlotAt(int index) {
// Set the new backing element.
if (elements_[new_backing_index].is_synced()) {
elements_[new_backing_index] =
- FrameElement::RegisterElement(backing_reg, FrameElement::SYNCED);
+ FrameElement::RegisterElement(backing_reg,
+ FrameElement::SYNCED,
+ original.number_info());
} else {
elements_[new_backing_index] =
- FrameElement::RegisterElement(backing_reg, FrameElement::NOT_SYNCED);
+ FrameElement::RegisterElement(backing_reg,
+ FrameElement::NOT_SYNCED,
+ original.number_info());
}
// Update the other copies.
for (int i = new_backing_index + 1; i < element_count(); i++) {
@@ -339,7 +349,8 @@ void VirtualFrame::TakeFrameSlotAt(int index) {
ASSERT(fresh.is_valid());
FrameElement new_element =
FrameElement::RegisterElement(fresh.reg(),
- FrameElement::NOT_SYNCED);
+ FrameElement::NOT_SYNCED,
+ original.number_info());
Use(fresh.reg(), element_count());
elements_.Add(new_element);
__ movq(fresh.reg(), Operand(rbp, fp_relative(index)));
@@ -480,10 +491,12 @@ void VirtualFrame::MakeMergable() {
for (int i = 0; i < element_count(); i++) {
FrameElement element = elements_[i];
+ // In all cases we have to reset the number type information
+ // to unknown for a mergable frame because of incoming back edges.
if (element.is_constant() || element.is_copy()) {
if (element.is_synced()) {
// Just spill.
- elements_[i] = FrameElement::MemoryElement();
+ elements_[i] = FrameElement::MemoryElement(NumberInfo::kUnknown);
} else {
// Allocate to a register.
FrameElement backing_element; // Invalid if not a copy.
@@ -494,7 +507,8 @@ void VirtualFrame::MakeMergable() {
ASSERT(fresh.is_valid()); // A register was spilled if all were in use.
elements_[i] =
FrameElement::RegisterElement(fresh.reg(),
- FrameElement::NOT_SYNCED);
+ FrameElement::NOT_SYNCED,
+ NumberInfo::kUnknown);
Use(fresh.reg(), i);
// Emit a move.
@@ -523,6 +537,7 @@ void VirtualFrame::MakeMergable() {
// The copy flag is not relied on before the end of this loop,
// including when registers are spilled.
elements_[i].clear_copied();
+ elements_[i].set_number_info(NumberInfo::kUnknown);
}
}
}
@@ -728,6 +743,14 @@ Result VirtualFrame::Pop() {
int index = element_count();
ASSERT(element.is_valid());
+ // Get number type information of the result.
+ NumberInfo::Type info;
+ if (!element.is_copy()) {
+ info = element.number_info();
+ } else {
+ info = elements_[element.index()].number_info();
+ }
+
bool pop_needed = (stack_pointer_ == index);
if (pop_needed) {
stack_pointer_--;
@@ -735,6 +758,7 @@ Result VirtualFrame::Pop() {
Result temp = cgen()->allocator()->Allocate();
ASSERT(temp.is_valid());
__ pop(temp.reg());
+ temp.set_number_info(info);
return temp;
}
@@ -762,14 +786,16 @@ Result VirtualFrame::Pop() {
ASSERT(temp.is_valid());
Use(temp.reg(), index);
FrameElement new_element =
- FrameElement::RegisterElement(temp.reg(), FrameElement::SYNCED);
+ FrameElement::RegisterElement(temp.reg(),
+ FrameElement::SYNCED,
+ element.number_info());
// Preserve the copy flag on the element.
if (element.is_copied()) new_element.set_copied();
elements_[index] = new_element;
__ movq(temp.reg(), Operand(rbp, fp_relative(index)));
- return Result(temp.reg());
+ return Result(temp.reg(), info);
} else if (element.is_register()) {
- return Result(element.reg());
+ return Result(element.reg(), info);
} else {
ASSERT(element.is_constant());
return Result(element.handle());
@@ -969,6 +995,17 @@ Result VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
}
+#ifdef ENABLE_DEBUGGER_SUPPORT
+void VirtualFrame::DebugBreak() {
+ PrepareForCall(0, 0);
+ ASSERT(cgen()->HasValidEntryRegisters());
+ __ DebugBreak();
+ Result result = cgen()->allocator()->Allocate(rax);
+ ASSERT(result.is_valid());
+}
+#endif
+
+
Result VirtualFrame::CallLoadIC(RelocInfo::Mode mode) {
// Name and receiver are on the top of the frame. The IC expects
// name in rcx and receiver on the stack. It does not drop the
@@ -996,7 +1033,6 @@ Result VirtualFrame::CallKeyedStoreIC() {
// expects value in rax and key and receiver on the stack. It does
// not drop the key and receiver.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
- // TODO(1222589): Make the IC grab the values from the stack.
Result value = Pop();
PrepareForCall(2, 0); // Two stack args, neither callee-dropped.
value.ToRegister(rax);
@@ -1008,14 +1044,17 @@ Result VirtualFrame::CallKeyedStoreIC() {
Result VirtualFrame::CallCallIC(RelocInfo::Mode mode,
int arg_count,
int loop_nesting) {
- // Arguments, receiver, and function name are on top of the frame.
- // The IC expects them on the stack. It does not drop the function
- // name slot (but it does drop the rest).
+ // Function name, arguments, and receiver are found on top of the frame
+ // and dropped by the call. The IC expects the name in rcx and the rest
+ // on the stack, and drops them all.
InLoopFlag in_loop = loop_nesting > 0 ? IN_LOOP : NOT_IN_LOOP;
Handle<Code> ic = cgen()->ComputeCallInitialize(arg_count, in_loop);
+ Result name = Pop();
// Spill args, receiver, and function. The call will drop args and
// receiver.
- PrepareForCall(arg_count + 2, arg_count + 1);
+ PrepareForCall(arg_count + 1, arg_count + 1);
+ name.ToRegister(rcx);
+ name.Unuse();
return RawCallCodeObject(ic, mode);
}
diff --git a/deps/v8/src/x64/virtual-frame-x64.h b/deps/v8/src/x64/virtual-frame-x64.h
index 8e3e40f07..c9aa79918 100644
--- a/deps/v8/src/x64/virtual-frame-x64.h
+++ b/deps/v8/src/x64/virtual-frame-x64.h
@@ -28,6 +28,7 @@
#ifndef V8_X64_VIRTUAL_FRAME_X64_H_
#define V8_X64_VIRTUAL_FRAME_X64_H_
+#include "number-info.h"
#include "register-allocator.h"
#include "scopes.h"
@@ -81,7 +82,8 @@ class VirtualFrame : public ZoneObject {
MacroAssembler* masm() { return cgen()->masm(); }
// Create a duplicate of an existing valid frame element.
- FrameElement CopyElementAt(int index);
+ FrameElement CopyElementAt(int index,
+ NumberInfo::Type info = NumberInfo::kUninitialized);
// The number of elements on the virtual frame.
int element_count() { return elements_.length(); }
@@ -321,6 +323,10 @@ class VirtualFrame : public ZoneObject {
Result CallRuntime(Runtime::Function* f, int arg_count);
Result CallRuntime(Runtime::FunctionId id, int arg_count);
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ void DebugBreak();
+#endif
+
// Invoke builtin given the number of arguments it expects on (and
// removes from) the stack.
Result InvokeBuiltin(Builtins::JavaScript id,
@@ -343,9 +349,9 @@ class VirtualFrame : public ZoneObject {
// of the frame. Key and receiver are not dropped.
Result CallKeyedStoreIC();
- // Call call IC. Arguments, receiver, and function name are found
- // on top of the frame. Function name slot is not dropped. The
- // argument count does not include the receiver.
+ // Call call IC. Function name, arguments, and receiver are found on top
+ // of the frame and dropped by the call.
+ // The argument count does not include the receiver.
Result CallCallIC(RelocInfo::Mode mode, int arg_count, int loop_nesting);
// Allocate and call JS function as constructor. Arguments,
@@ -376,16 +382,20 @@ class VirtualFrame : public ZoneObject {
// Push an element on top of the expression stack and emit a
// corresponding push instruction.
- void EmitPush(Register reg);
- void EmitPush(const Operand& operand);
- void EmitPush(Heap::RootListIndex index);
- void EmitPush(Immediate immediate);
+ void EmitPush(Register reg,
+ NumberInfo::Type info = NumberInfo::kUnknown);
+ void EmitPush(const Operand& operand,
+ NumberInfo::Type info = NumberInfo::kUnknown);
+ void EmitPush(Heap::RootListIndex index,
+ NumberInfo::Type info = NumberInfo::kUnknown);
+ void EmitPush(Immediate immediate,
+ NumberInfo::Type info = NumberInfo::kUnknown);
void EmitPush(Smi* value);
// Uses kScratchRegister, emits appropriate relocation info.
void EmitPush(Handle<Object> value);
// Push an element on the virtual frame.
- void Push(Register reg);
+ void Push(Register reg, NumberInfo::Type info = NumberInfo::kUnknown);
void Push(Handle<Object> value);
void Push(Smi* value) { Push(Handle<Object>(value)); }
@@ -393,7 +403,7 @@ class VirtualFrame : public ZoneObject {
// frame).
void Push(Result* result) {
if (result->is_register()) {
- Push(result->reg());
+ Push(result->reg(), result->number_info());
} else {
ASSERT(result->is_constant());
Push(result->handle());
diff --git a/deps/v8/test/cctest/SConscript b/deps/v8/test/cctest/SConscript
index e6c81d80e..acd567e57 100644
--- a/deps/v8/test/cctest/SConscript
+++ b/deps/v8/test/cctest/SConscript
@@ -63,7 +63,10 @@ SOURCES = {
'test-utils.cc',
'test-version.cc'
],
- 'arch:arm': ['test-assembler-arm.cc', 'test-disasm-arm.cc'],
+ 'arch:arm': [
+ 'test-assembler-arm.cc',
+ 'test-disasm-arm.cc'
+ ],
'arch:ia32': [
'test-assembler-ia32.cc',
'test-disasm-ia32.cc',
@@ -72,6 +75,7 @@ SOURCES = {
'arch:x64': ['test-assembler-x64.cc',
'test-macro-assembler-x64.cc',
'test-log-stack-tracer.cc'],
+ 'arch:mips': ['test-assembler-mips.cc'],
'os:linux': ['test-platform-linux.cc'],
'os:macos': ['test-platform-macos.cc'],
'os:nullos': ['test-platform-nullos.cc'],
diff --git a/deps/v8/test/cctest/cctest.status b/deps/v8/test/cctest/cctest.status
index a143cbdab..ecbafa0ac 100644
--- a/deps/v8/test/cctest/cctest.status
+++ b/deps/v8/test/cctest/cctest.status
@@ -52,3 +52,23 @@ test-api/OutOfMemoryNested: SKIP
# BUG(355): Test crashes on ARM.
test-log/ProfLazyMode: SKIP
+
+[ $arch == mips ]
+test-accessors: SKIP
+test-alloc: SKIP
+test-api: SKIP
+test-compiler: SKIP
+test-debug: SKIP
+test-decls: SKIP
+test-func-name-inference: SKIP
+test-heap: SKIP
+test-heap-profiler: SKIP
+test-log: SKIP
+test-log-utils: SKIP
+test-mark-compact: SKIP
+test-regexp: SKIP
+test-serialize: SKIP
+test-sockets: SKIP
+test-strings: SKIP
+test-threads: SKIP
+test-thread-termination: SKIP
diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc
index f71b3258a..a60ea574f 100644
--- a/deps/v8/test/cctest/test-api.cc
+++ b/deps/v8/test/cctest/test-api.cc
@@ -394,6 +394,9 @@ THREADED_TEST(ScriptMakingExternalString) {
v8::HandleScope scope;
LocalContext env;
Local<String> source = String::New(two_byte_source);
+ // Trigger GCs so that the newly allocated string moves to old gen.
+ i::Heap::CollectGarbage(0, i::NEW_SPACE); // in survivor space now
+ i::Heap::CollectGarbage(0, i::NEW_SPACE); // in old gen now
bool success = source->MakeExternal(new TestResource(two_byte_source));
CHECK(success);
Local<Script> script = Script::Compile(source);
@@ -416,6 +419,9 @@ THREADED_TEST(ScriptMakingExternalAsciiString) {
v8::HandleScope scope;
LocalContext env;
Local<String> source = v8_str(c_source);
+ // Trigger GCs so that the newly allocated string moves to old gen.
+ i::Heap::CollectGarbage(0, i::NEW_SPACE); // in survivor space now
+ i::Heap::CollectGarbage(0, i::NEW_SPACE); // in old gen now
bool success = source->MakeExternal(
new TestAsciiResource(i::StrDup(c_source)));
CHECK(success);
@@ -432,6 +438,80 @@ THREADED_TEST(ScriptMakingExternalAsciiString) {
}
+TEST(MakingExternalStringConditions) {
+ v8::HandleScope scope;
+ LocalContext env;
+
+ // Free some space in the new space so that we can check freshness.
+ i::Heap::CollectGarbage(0, i::NEW_SPACE);
+ i::Heap::CollectGarbage(0, i::NEW_SPACE);
+
+ Local<String> small_string = String::New(AsciiToTwoByteString("small"));
+ // We should refuse to externalize newly created small string.
+ CHECK(!small_string->CanMakeExternal());
+ // Trigger GCs so that the newly allocated string moves to old gen.
+ i::Heap::CollectGarbage(0, i::NEW_SPACE); // in survivor space now
+ i::Heap::CollectGarbage(0, i::NEW_SPACE); // in old gen now
+ // Old space strings should be accepted.
+ CHECK(small_string->CanMakeExternal());
+
+ small_string = String::New(AsciiToTwoByteString("small 2"));
+ // We should refuse externalizing newly created small string.
+ CHECK(!small_string->CanMakeExternal());
+ for (int i = 0; i < 100; i++) {
+ String::Value value(small_string);
+ }
+ // Frequently used strings should be accepted.
+ CHECK(small_string->CanMakeExternal());
+
+ const int buf_size = 10 * 1024;
+ char* buf = i::NewArray<char>(buf_size);
+ memset(buf, 'a', buf_size);
+ buf[buf_size - 1] = '\0';
+ Local<String> large_string = String::New(AsciiToTwoByteString(buf));
+ i::DeleteArray(buf);
+ // Large strings should be immediately accepted.
+ CHECK(large_string->CanMakeExternal());
+}
+
+
+TEST(MakingExternalAsciiStringConditions) {
+ v8::HandleScope scope;
+ LocalContext env;
+
+ // Free some space in the new space so that we can check freshness.
+ i::Heap::CollectGarbage(0, i::NEW_SPACE);
+ i::Heap::CollectGarbage(0, i::NEW_SPACE);
+
+ Local<String> small_string = String::New("small");
+ // We should refuse to externalize newly created small string.
+ CHECK(!small_string->CanMakeExternal());
+ // Trigger GCs so that the newly allocated string moves to old gen.
+ i::Heap::CollectGarbage(0, i::NEW_SPACE); // in survivor space now
+ i::Heap::CollectGarbage(0, i::NEW_SPACE); // in old gen now
+ // Old space strings should be accepted.
+ CHECK(small_string->CanMakeExternal());
+
+ small_string = String::New("small 2");
+ // We should refuse externalizing newly created small string.
+ CHECK(!small_string->CanMakeExternal());
+ for (int i = 0; i < 100; i++) {
+ String::Value value(small_string);
+ }
+ // Frequently used strings should be accepted.
+ CHECK(small_string->CanMakeExternal());
+
+ const int buf_size = 10 * 1024;
+ char* buf = i::NewArray<char>(buf_size);
+ memset(buf, 'a', buf_size);
+ buf[buf_size - 1] = '\0';
+ Local<String> large_string = String::New(buf);
+ i::DeleteArray(buf);
+ // Large strings should be immediately accepted.
+ CHECK(large_string->CanMakeExternal());
+}
+
+
THREADED_TEST(UsingExternalString) {
{
v8::HandleScope scope;
@@ -2297,6 +2377,103 @@ THREADED_TEST(SimplePropertyRead) {
}
}
+THREADED_TEST(DefinePropertyOnAPIAccessor) {
+ v8::HandleScope scope;
+ Local<ObjectTemplate> templ = ObjectTemplate::New();
+ templ->SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut"));
+ LocalContext context;
+ context->Global()->Set(v8_str("obj"), templ->NewInstance());
+
+ // Uses getOwnPropertyDescriptor to check the configurable status
+ Local<Script> script_desc
+ = Script::Compile(v8_str("var prop =Object.getOwnPropertyDescriptor( "
+ "obj, 'x');"
+ "prop.configurable;"));
+ Local<Value> result = script_desc->Run();
+ CHECK_EQ(result->BooleanValue(), true);
+
+ // Redefine get - but still configurable
+ Local<Script> script_define
+ = Script::Compile(v8_str("var desc = { get: function(){return 42; },"
+ " configurable: true };"
+ "Object.defineProperty(obj, 'x', desc);"
+ "obj.x"));
+ result = script_define->Run();
+ CHECK_EQ(result, v8_num(42));
+
+ // Check that the accessor is still configurable
+ result = script_desc->Run();
+ CHECK_EQ(result->BooleanValue(), true);
+
+ // Redefine to a non-configurable
+ script_define
+ = Script::Compile(v8_str("var desc = { get: function(){return 43; },"
+ " configurable: false };"
+ "Object.defineProperty(obj, 'x', desc);"
+ "obj.x"));
+ result = script_define->Run();
+ CHECK_EQ(result, v8_num(43));
+ result = script_desc->Run();
+ CHECK_EQ(result->BooleanValue(), false);
+
+ // Make sure that it is not possible to redefine again
+ v8::TryCatch try_catch;
+ result = script_define->Run();
+ CHECK(try_catch.HasCaught());
+ String::AsciiValue exception_value(try_catch.Exception());
+ CHECK_EQ(*exception_value,
+ "TypeError: Cannot redefine property: defineProperty");
+}
+
+THREADED_TEST(DefinePropertyOnDefineGetterSetter) {
+ v8::HandleScope scope;
+ Local<ObjectTemplate> templ = ObjectTemplate::New();
+ templ->SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut"));
+ LocalContext context;
+ context->Global()->Set(v8_str("obj"), templ->NewInstance());
+
+ Local<Script> script_desc = Script::Compile(v8_str("var prop ="
+ "Object.getOwnPropertyDescriptor( "
+ "obj, 'x');"
+ "prop.configurable;"));
+ Local<Value> result = script_desc->Run();
+ CHECK_EQ(result->BooleanValue(), true);
+
+ Local<Script> script_define =
+ Script::Compile(v8_str("var desc = {get: function(){return 42; },"
+ " configurable: true };"
+ "Object.defineProperty(obj, 'x', desc);"
+ "obj.x"));
+ result = script_define->Run();
+ CHECK_EQ(result, v8_num(42));
+
+
+ result = script_desc->Run();
+ CHECK_EQ(result->BooleanValue(), true);
+
+
+ script_define =
+ Script::Compile(v8_str("var desc = {get: function(){return 43; },"
+ " configurable: false };"
+ "Object.defineProperty(obj, 'x', desc);"
+ "obj.x"));
+ result = script_define->Run();
+ CHECK_EQ(result, v8_num(43));
+ result = script_desc->Run();
+
+ CHECK_EQ(result->BooleanValue(), false);
+
+ v8::TryCatch try_catch;
+ result = script_define->Run();
+ CHECK(try_catch.HasCaught());
+ String::AsciiValue exception_value(try_catch.Exception());
+ CHECK_EQ(*exception_value,
+ "TypeError: Cannot redefine property: defineProperty");
+}
+
+
+
+
v8::Persistent<Value> xValue;
@@ -2351,6 +2528,33 @@ THREADED_TEST(NamedInterceptorPropertyRead) {
}
+static v8::Handle<Value> SetXOnPrototypeGetter(Local<String> property,
+ const AccessorInfo& info) {
+ // Set x on the prototype object and do not handle the get request.
+ v8::Handle<v8::Value> proto = info.Holder()->GetPrototype();
+ v8::Handle<v8::Object>::Cast(proto)->Set(v8_str("x"), v8::Integer::New(23));
+ return v8::Handle<Value>();
+}
+
+
+// This is a regression test for http://crbug.com/20104. Map
+// transitions should not interfere with post interceptor lookup.
+THREADED_TEST(NamedInterceptorMapTransitionRead) {
+ v8::HandleScope scope;
+ Local<v8::FunctionTemplate> function_template = v8::FunctionTemplate::New();
+ Local<v8::ObjectTemplate> instance_template
+ = function_template->InstanceTemplate();
+ instance_template->SetNamedPropertyHandler(SetXOnPrototypeGetter);
+ LocalContext context;
+ context->Global()->Set(v8_str("F"), function_template->GetFunction());
+ // Create an instance of F and introduce a map transition for x.
+ CompileRun("var o = new F(); o.x = 23;");
+ // Create an instance of F and invoke the getter. The result should be 23.
+ Local<Value> result = CompileRun("o = new F(); o.x");
+ CHECK_EQ(result->Int32Value(), 23);
+}
+
+
static v8::Handle<Value> IndexedPropertyGetter(uint32_t index,
const AccessorInfo& info) {
ApiTestFuzzer::Fuzz();
@@ -2432,6 +2636,195 @@ THREADED_TEST(IndexedInterceptorWithNoSetter) {
}
+THREADED_TEST(IndexedInterceptorWithAccessorCheck) {
+ v8::HandleScope scope;
+ Local<ObjectTemplate> templ = ObjectTemplate::New();
+ templ->SetIndexedPropertyHandler(IdentityIndexedPropertyGetter);
+
+ LocalContext context;
+ Local<v8::Object> obj = templ->NewInstance();
+ obj->TurnOnAccessCheck();
+ context->Global()->Set(v8_str("obj"), obj);
+
+ const char* code =
+ "try {"
+ " for (var i = 0; i < 100; i++) {"
+ " var v = obj[0];"
+ " if (v != undefined) throw 'Wrong value ' + v + ' at iteration ' + i;"
+ " }"
+ " 'PASSED'"
+ "} catch(e) {"
+ " e"
+ "}";
+ ExpectString(code, "PASSED");
+}
+
+
+THREADED_TEST(IndexedInterceptorWithAccessorCheckSwitchedOn) {
+ i::FLAG_allow_natives_syntax = true;
+ v8::HandleScope scope;
+ Local<ObjectTemplate> templ = ObjectTemplate::New();
+ templ->SetIndexedPropertyHandler(IdentityIndexedPropertyGetter);
+
+ LocalContext context;
+ Local<v8::Object> obj = templ->NewInstance();
+ context->Global()->Set(v8_str("obj"), obj);
+
+ const char* code =
+ "try {"
+ " for (var i = 0; i < 100; i++) {"
+ " var expected = i;"
+ " if (i == 5) {"
+ " %EnableAccessChecks(obj);"
+ " expected = undefined;"
+ " }"
+ " var v = obj[i];"
+ " if (v != expected) throw 'Wrong value ' + v + ' at iteration ' + i;"
+ " if (i == 5) %DisableAccessChecks(obj);"
+ " }"
+ " 'PASSED'"
+ "} catch(e) {"
+ " e"
+ "}";
+ ExpectString(code, "PASSED");
+}
+
+
+THREADED_TEST(IndexedInterceptorWithDifferentIndices) {
+ v8::HandleScope scope;
+ Local<ObjectTemplate> templ = ObjectTemplate::New();
+ templ->SetIndexedPropertyHandler(IdentityIndexedPropertyGetter);
+
+ LocalContext context;
+ Local<v8::Object> obj = templ->NewInstance();
+ context->Global()->Set(v8_str("obj"), obj);
+
+ const char* code =
+ "try {"
+ " for (var i = 0; i < 100; i++) {"
+ " var v = obj[i];"
+ " if (v != i) throw 'Wrong value ' + v + ' at iteration ' + i;"
+ " }"
+ " 'PASSED'"
+ "} catch(e) {"
+ " e"
+ "}";
+ ExpectString(code, "PASSED");
+}
+
+
+THREADED_TEST(IndexedInterceptorWithNotSmiLookup) {
+ v8::HandleScope scope;
+ Local<ObjectTemplate> templ = ObjectTemplate::New();
+ templ->SetIndexedPropertyHandler(IdentityIndexedPropertyGetter);
+
+ LocalContext context;
+ Local<v8::Object> obj = templ->NewInstance();
+ context->Global()->Set(v8_str("obj"), obj);
+
+ const char* code =
+ "try {"
+ " for (var i = 0; i < 100; i++) {"
+ " var expected = i;"
+ " if (i == 50) {"
+ " i = 'foobar';"
+ " expected = undefined;"
+ " }"
+ " var v = obj[i];"
+ " if (v != expected) throw 'Wrong value ' + v + ' at iteration ' + i;"
+ " }"
+ " 'PASSED'"
+ "} catch(e) {"
+ " e"
+ "}";
+ ExpectString(code, "PASSED");
+}
+
+
+THREADED_TEST(IndexedInterceptorGoingMegamorphic) {
+ v8::HandleScope scope;
+ Local<ObjectTemplate> templ = ObjectTemplate::New();
+ templ->SetIndexedPropertyHandler(IdentityIndexedPropertyGetter);
+
+ LocalContext context;
+ Local<v8::Object> obj = templ->NewInstance();
+ context->Global()->Set(v8_str("obj"), obj);
+
+ const char* code =
+ "var original = obj;"
+ "try {"
+ " for (var i = 0; i < 100; i++) {"
+ " var expected = i;"
+ " if (i == 50) {"
+ " obj = {50: 'foobar'};"
+ " expected = 'foobar';"
+ " }"
+ " var v = obj[i];"
+ " if (v != expected) throw 'Wrong value ' + v + ' at iteration ' + i;"
+ " if (i == 50) obj = original;"
+ " }"
+ " 'PASSED'"
+ "} catch(e) {"
+ " e"
+ "}";
+ ExpectString(code, "PASSED");
+}
+
+
+THREADED_TEST(IndexedInterceptorReceiverTurningSmi) {
+ v8::HandleScope scope;
+ Local<ObjectTemplate> templ = ObjectTemplate::New();
+ templ->SetIndexedPropertyHandler(IdentityIndexedPropertyGetter);
+
+ LocalContext context;
+ Local<v8::Object> obj = templ->NewInstance();
+ context->Global()->Set(v8_str("obj"), obj);
+
+ const char* code =
+ "var original = obj;"
+ "try {"
+ " for (var i = 0; i < 100; i++) {"
+ " var expected = i;"
+ " if (i == 5) {"
+ " obj = 239;"
+ " expected = undefined;"
+ " }"
+ " var v = obj[i];"
+ " if (v != expected) throw 'Wrong value ' + v + ' at iteration ' + i;"
+ " if (i == 5) obj = original;"
+ " }"
+ " 'PASSED'"
+ "} catch(e) {"
+ " e"
+ "}";
+ ExpectString(code, "PASSED");
+}
+
+
+THREADED_TEST(IndexedInterceptorOnProto) {
+ v8::HandleScope scope;
+ Local<ObjectTemplate> templ = ObjectTemplate::New();
+ templ->SetIndexedPropertyHandler(IdentityIndexedPropertyGetter);
+
+ LocalContext context;
+ Local<v8::Object> obj = templ->NewInstance();
+ context->Global()->Set(v8_str("obj"), obj);
+
+ const char* code =
+ "var o = {__proto__: obj};"
+ "try {"
+ " for (var i = 0; i < 100; i++) {"
+ " var v = o[i];"
+ " if (v != i) throw 'Wrong value ' + v + ' at iteration ' + i;"
+ " }"
+ " 'PASSED'"
+ "} catch(e) {"
+ " e"
+ "}";
+ ExpectString(code, "PASSED");
+}
+
+
THREADED_TEST(MultiContexts) {
v8::HandleScope scope;
v8::Handle<ObjectTemplate> templ = ObjectTemplate::New();
@@ -4746,6 +5139,84 @@ THREADED_TEST(HiddenPrototype) {
}
+THREADED_TEST(SetPrototype) {
+ v8::HandleScope handle_scope;
+ LocalContext context;
+
+ Local<v8::FunctionTemplate> t0 = v8::FunctionTemplate::New();
+ t0->InstanceTemplate()->Set(v8_str("x"), v8_num(0));
+ Local<v8::FunctionTemplate> t1 = v8::FunctionTemplate::New();
+ t1->SetHiddenPrototype(true);
+ t1->InstanceTemplate()->Set(v8_str("y"), v8_num(1));
+ Local<v8::FunctionTemplate> t2 = v8::FunctionTemplate::New();
+ t2->SetHiddenPrototype(true);
+ t2->InstanceTemplate()->Set(v8_str("z"), v8_num(2));
+ Local<v8::FunctionTemplate> t3 = v8::FunctionTemplate::New();
+ t3->InstanceTemplate()->Set(v8_str("u"), v8_num(3));
+
+ Local<v8::Object> o0 = t0->GetFunction()->NewInstance();
+ Local<v8::Object> o1 = t1->GetFunction()->NewInstance();
+ Local<v8::Object> o2 = t2->GetFunction()->NewInstance();
+ Local<v8::Object> o3 = t3->GetFunction()->NewInstance();
+
+ // Setting the prototype on an object does not skip hidden prototypes.
+ CHECK_EQ(0, o0->Get(v8_str("x"))->Int32Value());
+ CHECK(o0->SetPrototype(o1));
+ CHECK_EQ(0, o0->Get(v8_str("x"))->Int32Value());
+ CHECK_EQ(1, o0->Get(v8_str("y"))->Int32Value());
+ CHECK(o1->SetPrototype(o2));
+ CHECK_EQ(0, o0->Get(v8_str("x"))->Int32Value());
+ CHECK_EQ(1, o0->Get(v8_str("y"))->Int32Value());
+ CHECK_EQ(2, o0->Get(v8_str("z"))->Int32Value());
+ CHECK(o2->SetPrototype(o3));
+ CHECK_EQ(0, o0->Get(v8_str("x"))->Int32Value());
+ CHECK_EQ(1, o0->Get(v8_str("y"))->Int32Value());
+ CHECK_EQ(2, o0->Get(v8_str("z"))->Int32Value());
+ CHECK_EQ(3, o0->Get(v8_str("u"))->Int32Value());
+
+ // Getting the prototype of o0 should get the first visible one
+ // which is o3. Therefore, z should not be defined on the prototype
+ // object.
+ Local<Value> proto = o0->Get(v8_str("__proto__"));
+ CHECK(proto->IsObject());
+ CHECK_EQ(v8::Handle<v8::Object>::Cast(proto), o3);
+
+ // However, Object::GetPrototype ignores hidden prototype.
+ Local<Value> proto0 = o0->GetPrototype();
+ CHECK(proto0->IsObject());
+ CHECK_EQ(v8::Handle<v8::Object>::Cast(proto0), o1);
+
+ Local<Value> proto1 = o1->GetPrototype();
+ CHECK(proto1->IsObject());
+ CHECK_EQ(v8::Handle<v8::Object>::Cast(proto1), o2);
+
+ Local<Value> proto2 = o2->GetPrototype();
+ CHECK(proto2->IsObject());
+ CHECK_EQ(v8::Handle<v8::Object>::Cast(proto2), o3);
+}
+
+
+THREADED_TEST(SetPrototypeThrows) {
+ v8::HandleScope handle_scope;
+ LocalContext context;
+
+ Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New();
+
+ Local<v8::Object> o0 = t->GetFunction()->NewInstance();
+ Local<v8::Object> o1 = t->GetFunction()->NewInstance();
+
+ CHECK(o0->SetPrototype(o1));
+ // If setting the prototype leads to the cycle, SetPrototype should
+ // return false and keep VM in sane state.
+ v8::TryCatch try_catch;
+ CHECK(!o1->SetPrototype(o0));
+ CHECK(!try_catch.HasCaught());
+ ASSERT(!i::Top::has_pending_exception());
+
+ CHECK_EQ(42, CompileRun("function f() { return 42; }; f()")->Int32Value());
+}
+
+
THREADED_TEST(GetterSetterExceptions) {
v8::HandleScope handle_scope;
LocalContext context;
@@ -5793,6 +6264,294 @@ THREADED_TEST(InterceptorCallICCachedFromGlobal) {
CHECK_EQ(239 * 10, value->Int32Value());
}
+static v8::Handle<Value> InterceptorCallICFastApi(Local<String> name,
+ const AccessorInfo& info) {
+ ApiTestFuzzer::Fuzz();
+ int* call_count = reinterpret_cast<int*>(v8::External::Unwrap(info.Data()));
+ ++(*call_count);
+ if ((*call_count) % 20 == 0) {
+ v8::internal::Heap::CollectAllGarbage(true);
+ }
+ return v8::Handle<Value>();
+}
+
+static v8::Handle<Value> FastApiCallback_TrivialSignature(
+ const v8::Arguments& args) {
+ ApiTestFuzzer::Fuzz();
+ CHECK_EQ(args.This(), args.Holder());
+ CHECK(args.Data()->Equals(v8_str("method_data")));
+ return v8::Integer::New(args[0]->Int32Value() + 1);
+}
+
+static v8::Handle<Value> FastApiCallback_SimpleSignature(
+ const v8::Arguments& args) {
+ ApiTestFuzzer::Fuzz();
+ CHECK_EQ(args.This()->GetPrototype(), args.Holder());
+ CHECK(args.Data()->Equals(v8_str("method_data")));
+ // Note, we're using HasRealNamedProperty instead of Has to avoid
+ // invoking the interceptor again.
+ CHECK(args.Holder()->HasRealNamedProperty(v8_str("foo")));
+ return v8::Integer::New(args[0]->Int32Value() + 1);
+}
+
+// Helper to maximize the odds of object moving.
+static void GenerateSomeGarbage() {
+ CompileRun(
+ "var garbage;"
+ "for (var i = 0; i < 1000; i++) {"
+ " garbage = [1/i, \"garbage\" + i, garbage, {foo: garbage}];"
+ "}"
+ "garbage = undefined;");
+}
+
+THREADED_TEST(InterceptorCallICFastApi_TrivialSignature) {
+ int interceptor_call_count = 0;
+ v8::HandleScope scope;
+ v8::Handle<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New();
+ v8::Handle<v8::FunctionTemplate> method_templ =
+ v8::FunctionTemplate::New(FastApiCallback_TrivialSignature,
+ v8_str("method_data"),
+ v8::Handle<v8::Signature>());
+ v8::Handle<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
+ proto_templ->Set(v8_str("method"), method_templ);
+ v8::Handle<v8::ObjectTemplate> templ = fun_templ->InstanceTemplate();
+ templ->SetNamedPropertyHandler(InterceptorCallICFastApi,
+ NULL, NULL, NULL, NULL,
+ v8::External::Wrap(&interceptor_call_count));
+ LocalContext context;
+ v8::Handle<v8::Function> fun = fun_templ->GetFunction();
+ GenerateSomeGarbage();
+ context->Global()->Set(v8_str("o"), fun->NewInstance());
+ v8::Handle<Value> value = CompileRun(
+ "var result = 0;"
+ "for (var i = 0; i < 100; i++) {"
+ " result = o.method(41);"
+ "}");
+ CHECK_EQ(42, context->Global()->Get(v8_str("result"))->Int32Value());
+ CHECK_EQ(100, interceptor_call_count);
+}
+
+THREADED_TEST(InterceptorCallICFastApi_SimpleSignature) {
+ int interceptor_call_count = 0;
+ v8::HandleScope scope;
+ v8::Handle<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New();
+ v8::Handle<v8::FunctionTemplate> method_templ =
+ v8::FunctionTemplate::New(FastApiCallback_SimpleSignature,
+ v8_str("method_data"),
+ v8::Signature::New(fun_templ));
+ v8::Handle<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
+ proto_templ->Set(v8_str("method"), method_templ);
+ v8::Handle<v8::ObjectTemplate> templ = fun_templ->InstanceTemplate();
+ templ->SetNamedPropertyHandler(InterceptorCallICFastApi,
+ NULL, NULL, NULL, NULL,
+ v8::External::Wrap(&interceptor_call_count));
+ LocalContext context;
+ v8::Handle<v8::Function> fun = fun_templ->GetFunction();
+ GenerateSomeGarbage();
+ context->Global()->Set(v8_str("o"), fun->NewInstance());
+ v8::Handle<Value> value = CompileRun(
+ "o.foo = 17;"
+ "var receiver = {};"
+ "receiver.__proto__ = o;"
+ "var result = 0;"
+ "for (var i = 0; i < 100; i++) {"
+ " result = receiver.method(41);"
+ "}");
+ CHECK_EQ(42, context->Global()->Get(v8_str("result"))->Int32Value());
+ CHECK_EQ(100, interceptor_call_count);
+}
+
+THREADED_TEST(InterceptorCallICFastApi_SimpleSignature_Miss1) {
+ int interceptor_call_count = 0;
+ v8::HandleScope scope;
+ v8::Handle<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New();
+ v8::Handle<v8::FunctionTemplate> method_templ =
+ v8::FunctionTemplate::New(FastApiCallback_SimpleSignature,
+ v8_str("method_data"),
+ v8::Signature::New(fun_templ));
+ v8::Handle<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
+ proto_templ->Set(v8_str("method"), method_templ);
+ v8::Handle<v8::ObjectTemplate> templ = fun_templ->InstanceTemplate();
+ templ->SetNamedPropertyHandler(InterceptorCallICFastApi,
+ NULL, NULL, NULL, NULL,
+ v8::External::Wrap(&interceptor_call_count));
+ LocalContext context;
+ v8::Handle<v8::Function> fun = fun_templ->GetFunction();
+ GenerateSomeGarbage();
+ context->Global()->Set(v8_str("o"), fun->NewInstance());
+ v8::Handle<Value> value = CompileRun(
+ "o.foo = 17;"
+ "var receiver = {};"
+ "receiver.__proto__ = o;"
+ "var result = 0;"
+ "var saved_result = 0;"
+ "for (var i = 0; i < 100; i++) {"
+ " result = receiver.method(41);"
+ " if (i == 50) {"
+ " saved_result = result;"
+ " receiver = {method: function(x) { return x - 1 }};"
+ " }"
+ "}");
+ CHECK_EQ(40, context->Global()->Get(v8_str("result"))->Int32Value());
+ CHECK_EQ(42, context->Global()->Get(v8_str("saved_result"))->Int32Value());
+ CHECK_GE(interceptor_call_count, 50);
+}
+
+THREADED_TEST(InterceptorCallICFastApi_SimpleSignature_Miss2) {
+ int interceptor_call_count = 0;
+ v8::HandleScope scope;
+ v8::Handle<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New();
+ v8::Handle<v8::FunctionTemplate> method_templ =
+ v8::FunctionTemplate::New(FastApiCallback_SimpleSignature,
+ v8_str("method_data"),
+ v8::Signature::New(fun_templ));
+ v8::Handle<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
+ proto_templ->Set(v8_str("method"), method_templ);
+ v8::Handle<v8::ObjectTemplate> templ = fun_templ->InstanceTemplate();
+ templ->SetNamedPropertyHandler(InterceptorCallICFastApi,
+ NULL, NULL, NULL, NULL,
+ v8::External::Wrap(&interceptor_call_count));
+ LocalContext context;
+ v8::Handle<v8::Function> fun = fun_templ->GetFunction();
+ GenerateSomeGarbage();
+ context->Global()->Set(v8_str("o"), fun->NewInstance());
+ v8::Handle<Value> value = CompileRun(
+ "o.foo = 17;"
+ "var receiver = {};"
+ "receiver.__proto__ = o;"
+ "var result = 0;"
+ "var saved_result = 0;"
+ "for (var i = 0; i < 100; i++) {"
+ " result = receiver.method(41);"
+ " if (i == 50) {"
+ " saved_result = result;"
+ " o.method = function(x) { return x - 1 };"
+ " }"
+ "}");
+ CHECK_EQ(40, context->Global()->Get(v8_str("result"))->Int32Value());
+ CHECK_EQ(42, context->Global()->Get(v8_str("saved_result"))->Int32Value());
+ CHECK_GE(interceptor_call_count, 50);
+}
+
+THREADED_TEST(InterceptorCallICFastApi_SimpleSignature_TypeError) {
+ int interceptor_call_count = 0;
+ v8::HandleScope scope;
+ v8::Handle<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New();
+ v8::Handle<v8::FunctionTemplate> method_templ =
+ v8::FunctionTemplate::New(FastApiCallback_SimpleSignature,
+ v8_str("method_data"),
+ v8::Signature::New(fun_templ));
+ v8::Handle<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
+ proto_templ->Set(v8_str("method"), method_templ);
+ v8::Handle<v8::ObjectTemplate> templ = fun_templ->InstanceTemplate();
+ templ->SetNamedPropertyHandler(InterceptorCallICFastApi,
+ NULL, NULL, NULL, NULL,
+ v8::External::Wrap(&interceptor_call_count));
+ LocalContext context;
+ v8::Handle<v8::Function> fun = fun_templ->GetFunction();
+ GenerateSomeGarbage();
+ context->Global()->Set(v8_str("o"), fun->NewInstance());
+ v8::TryCatch try_catch;
+ v8::Handle<Value> value = CompileRun(
+ "o.foo = 17;"
+ "var receiver = {};"
+ "receiver.__proto__ = o;"
+ "var result = 0;"
+ "var saved_result = 0;"
+ "for (var i = 0; i < 100; i++) {"
+ " result = receiver.method(41);"
+ " if (i == 50) {"
+ " saved_result = result;"
+ " receiver = {method: receiver.method};"
+ " }"
+ "}");
+ CHECK(try_catch.HasCaught());
+ CHECK_EQ(v8_str("TypeError: Illegal invocation"),
+ try_catch.Exception()->ToString());
+ CHECK_EQ(42, context->Global()->Get(v8_str("saved_result"))->Int32Value());
+ CHECK_GE(interceptor_call_count, 50);
+}
+
+THREADED_TEST(CallICFastApi_TrivialSignature) {
+ v8::HandleScope scope;
+ v8::Handle<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New();
+ v8::Handle<v8::FunctionTemplate> method_templ =
+ v8::FunctionTemplate::New(FastApiCallback_TrivialSignature,
+ v8_str("method_data"),
+ v8::Handle<v8::Signature>());
+ v8::Handle<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
+ proto_templ->Set(v8_str("method"), method_templ);
+ v8::Handle<v8::ObjectTemplate> templ = fun_templ->InstanceTemplate();
+ LocalContext context;
+ v8::Handle<v8::Function> fun = fun_templ->GetFunction();
+ GenerateSomeGarbage();
+ context->Global()->Set(v8_str("o"), fun->NewInstance());
+ v8::Handle<Value> value = CompileRun(
+ "var result = 0;"
+ "for (var i = 0; i < 100; i++) {"
+ " result = o.method(41);"
+ "}");
+
+ CHECK_EQ(42, context->Global()->Get(v8_str("result"))->Int32Value());
+}
+
+THREADED_TEST(CallICFastApi_SimpleSignature) {
+ v8::HandleScope scope;
+ v8::Handle<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New();
+ v8::Handle<v8::FunctionTemplate> method_templ =
+ v8::FunctionTemplate::New(FastApiCallback_SimpleSignature,
+ v8_str("method_data"),
+ v8::Signature::New(fun_templ));
+ v8::Handle<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
+ proto_templ->Set(v8_str("method"), method_templ);
+ v8::Handle<v8::ObjectTemplate> templ = fun_templ->InstanceTemplate();
+ LocalContext context;
+ v8::Handle<v8::Function> fun = fun_templ->GetFunction();
+ GenerateSomeGarbage();
+ context->Global()->Set(v8_str("o"), fun->NewInstance());
+ v8::Handle<Value> value = CompileRun(
+ "o.foo = 17;"
+ "var receiver = {};"
+ "receiver.__proto__ = o;"
+ "var result = 0;"
+ "for (var i = 0; i < 100; i++) {"
+ " result = receiver.method(41);"
+ "}");
+
+ CHECK_EQ(42, context->Global()->Get(v8_str("result"))->Int32Value());
+}
+
+THREADED_TEST(CallICFastApi_SimpleSignature_Miss) {
+ v8::HandleScope scope;
+ v8::Handle<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New();
+ v8::Handle<v8::FunctionTemplate> method_templ =
+ v8::FunctionTemplate::New(FastApiCallback_SimpleSignature,
+ v8_str("method_data"),
+ v8::Signature::New(fun_templ));
+ v8::Handle<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
+ proto_templ->Set(v8_str("method"), method_templ);
+ v8::Handle<v8::ObjectTemplate> templ = fun_templ->InstanceTemplate();
+ LocalContext context;
+ v8::Handle<v8::Function> fun = fun_templ->GetFunction();
+ GenerateSomeGarbage();
+ context->Global()->Set(v8_str("o"), fun->NewInstance());
+ v8::Handle<Value> value = CompileRun(
+ "o.foo = 17;"
+ "var receiver = {};"
+ "receiver.__proto__ = o;"
+ "var result = 0;"
+ "var saved_result = 0;"
+ "for (var i = 0; i < 100; i++) {"
+ " result = receiver.method(41);"
+ " if (i == 50) {"
+ " saved_result = result;"
+ " receiver = {method: function(x) { return x - 1 }};"
+ " }"
+ "}");
+ CHECK_EQ(40, context->Global()->Get(v8_str("result"))->Int32Value());
+ CHECK_EQ(42, context->Global()->Get(v8_str("saved_result"))->Int32Value());
+}
+
static int interceptor_call_count = 0;
@@ -8829,3 +9588,138 @@ TEST(Regress528) {
other_context.Dispose();
}
+
+
+THREADED_TEST(ScriptOrigin) {
+ v8::HandleScope scope;
+ LocalContext env;
+ v8::ScriptOrigin origin = v8::ScriptOrigin(v8::String::New("test"));
+ v8::Handle<v8::String> script = v8::String::New(
+ "function f() {}\n\nfunction g() {}");
+ v8::Script::Compile(script, &origin)->Run();
+ v8::Local<v8::Function> f = v8::Local<v8::Function>::Cast(
+ env->Global()->Get(v8::String::New("f")));
+ v8::Local<v8::Function> g = v8::Local<v8::Function>::Cast(
+ env->Global()->Get(v8::String::New("g")));
+
+ v8::ScriptOrigin script_origin_f = f->GetScriptOrigin();
+ CHECK_EQ("test", *v8::String::AsciiValue(script_origin_f.ResourceName()));
+ CHECK_EQ(0, script_origin_f.ResourceLineOffset()->Int32Value());
+
+ v8::ScriptOrigin script_origin_g = g->GetScriptOrigin();
+ CHECK_EQ("test", *v8::String::AsciiValue(script_origin_g.ResourceName()));
+ CHECK_EQ(0, script_origin_g.ResourceLineOffset()->Int32Value());
+}
+
+
+THREADED_TEST(ScriptLineNumber) {
+ v8::HandleScope scope;
+ LocalContext env;
+ v8::ScriptOrigin origin = v8::ScriptOrigin(v8::String::New("test"));
+ v8::Handle<v8::String> script = v8::String::New(
+ "function f() {}\n\nfunction g() {}");
+ v8::Script::Compile(script, &origin)->Run();
+ v8::Local<v8::Function> f = v8::Local<v8::Function>::Cast(
+ env->Global()->Get(v8::String::New("f")));
+ v8::Local<v8::Function> g = v8::Local<v8::Function>::Cast(
+ env->Global()->Get(v8::String::New("g")));
+ CHECK_EQ(0, f->GetScriptLineNumber());
+ CHECK_EQ(2, g->GetScriptLineNumber());
+}
+
+
+static v8::Handle<Value> GetterWhichReturns42(Local<String> name,
+ const AccessorInfo& info) {
+ return v8_num(42);
+}
+
+
+static void SetterWhichSetsYOnThisTo23(Local<String> name,
+ Local<Value> value,
+ const AccessorInfo& info) {
+ info.This()->Set(v8_str("y"), v8_num(23));
+}
+
+
+THREADED_TEST(SetterOnConstructorPrototype) {
+ v8::HandleScope scope;
+ Local<ObjectTemplate> templ = ObjectTemplate::New();
+ templ->SetAccessor(v8_str("x"),
+ GetterWhichReturns42,
+ SetterWhichSetsYOnThisTo23);
+ LocalContext context;
+ context->Global()->Set(v8_str("P"), templ->NewInstance());
+ CompileRun("function C1() {"
+ " this.x = 23;"
+ "};"
+ "C1.prototype = P;"
+ "function C2() {"
+ " this.x = 23"
+ "};"
+ "C2.prototype = { };"
+ "C2.prototype.__proto__ = P;");
+
+ v8::Local<v8::Script> script;
+ script = v8::Script::Compile(v8_str("new C1();"));
+ for (int i = 0; i < 10; i++) {
+ v8::Handle<v8::Object> c1 = v8::Handle<v8::Object>::Cast(script->Run());
+ CHECK_EQ(42, c1->Get(v8_str("x"))->Int32Value());
+ CHECK_EQ(23, c1->Get(v8_str("y"))->Int32Value());
+ }
+
+ script = v8::Script::Compile(v8_str("new C2();"));
+ for (int i = 0; i < 10; i++) {
+ v8::Handle<v8::Object> c2 = v8::Handle<v8::Object>::Cast(script->Run());
+ CHECK_EQ(42, c2->Get(v8_str("x"))->Int32Value());
+ CHECK_EQ(23, c2->Get(v8_str("y"))->Int32Value());
+ }
+}
+
+
+static v8::Handle<Value> NamedPropertyGetterWhichReturns42(
+ Local<String> name, const AccessorInfo& info) {
+ return v8_num(42);
+}
+
+
+static v8::Handle<Value> NamedPropertySetterWhichSetsYOnThisTo23(
+ Local<String> name, Local<Value> value, const AccessorInfo& info) {
+ if (name->Equals(v8_str("x"))) {
+ info.This()->Set(v8_str("y"), v8_num(23));
+ }
+ return v8::Handle<Value>();
+}
+
+
+THREADED_TEST(InterceptorOnConstructorPrototype) {
+ v8::HandleScope scope;
+ Local<ObjectTemplate> templ = ObjectTemplate::New();
+ templ->SetNamedPropertyHandler(NamedPropertyGetterWhichReturns42,
+ NamedPropertySetterWhichSetsYOnThisTo23);
+ LocalContext context;
+ context->Global()->Set(v8_str("P"), templ->NewInstance());
+ CompileRun("function C1() {"
+ " this.x = 23;"
+ "};"
+ "C1.prototype = P;"
+ "function C2() {"
+ " this.x = 23"
+ "};"
+ "C2.prototype = { };"
+ "C2.prototype.__proto__ = P;");
+
+ v8::Local<v8::Script> script;
+ script = v8::Script::Compile(v8_str("new C1();"));
+ for (int i = 0; i < 10; i++) {
+ v8::Handle<v8::Object> c1 = v8::Handle<v8::Object>::Cast(script->Run());
+ CHECK_EQ(23, c1->Get(v8_str("x"))->Int32Value());
+ CHECK_EQ(42, c1->Get(v8_str("y"))->Int32Value());
+ }
+
+ script = v8::Script::Compile(v8_str("new C2();"));
+ for (int i = 0; i < 10; i++) {
+ v8::Handle<v8::Object> c2 = v8::Handle<v8::Object>::Cast(script->Run());
+ CHECK_EQ(23, c2->Get(v8_str("x"))->Int32Value());
+ CHECK_EQ(42, c2->Get(v8_str("y"))->Int32Value());
+ }
+}
diff --git a/deps/v8/test/cctest/test-assembler-mips.cc b/deps/v8/test/cctest/test-assembler-mips.cc
new file mode 100644
index 000000000..ab011a73e
--- /dev/null
+++ b/deps/v8/test/cctest/test-assembler-mips.cc
@@ -0,0 +1,257 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "disassembler.h"
+#include "factory.h"
+#include "macro-assembler.h"
+#include "mips/macro-assembler-mips.h"
+#include "mips/simulator-mips.h"
+
+#include "cctest.h"
+
+using namespace v8::internal;
+
+
+// Define these function prototypes to match JSEntryFunction in execution.cc.
+typedef Object* (*F1)(int x, int p1, int p2, int p3, int p4);
+typedef Object* (*F2)(int x, int y, int p2, int p3, int p4);
+typedef Object* (*F3)(void* p, int p1, int p2, int p3, int p4);
+
+
+static v8::Persistent<v8::Context> env;
+
+
+// The test framework does not accept flags on the command line, so we set them.
+static void InitializeVM() {
+ // Disable compilation of natives by specifying an empty natives file.
+ FLAG_natives_file = "";
+
+ // Enable generation of comments.
+ FLAG_debug_code = true;
+
+ if (env.IsEmpty()) {
+ env = v8::Context::New();
+ }
+}
+
+
+#define __ assm.
+
+TEST(MIPS0) {
+ InitializeVM();
+ v8::HandleScope scope;
+
+ MacroAssembler assm(NULL, 0);
+
+ // Addition.
+ __ addu(v0, a0, a1);
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Object* code = Heap::CreateCode(desc,
+ NULL,
+ Code::ComputeFlags(Code::STUB),
+ Handle<Object>(Heap::undefined_value()));
+ CHECK(code->IsCode());
+#ifdef DEBUG
+ Code::cast(code)->Print();
+#endif
+ F2 f = FUNCTION_CAST<F2>(Code::cast(code)->entry());
+ int res = reinterpret_cast<int>(CALL_GENERATED_CODE(f, 0xab0, 0xc, 0, 0, 0));
+ ::printf("f() = %d\n", res);
+ CHECK_EQ(0xabc, res);
+}
+
+
+TEST(MIPS1) {
+ InitializeVM();
+ v8::HandleScope scope;
+
+ MacroAssembler assm(NULL, 0);
+ Label L, C;
+
+ __ mov(a1, a0);
+ __ li(v0, 0);
+ __ b(&C);
+ __ nop();
+
+ __ bind(&L);
+ __ add(v0, v0, a1);
+ __ addiu(a1, a1, -1);
+
+ __ bind(&C);
+ __ xori(v1, a1, 0);
+ __ Branch(ne, &L, v1, Operand(0));
+ __ nop();
+
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Object* code = Heap::CreateCode(desc,
+ NULL,
+ Code::ComputeFlags(Code::STUB),
+ Handle<Object>(Heap::undefined_value()));
+ CHECK(code->IsCode());
+#ifdef DEBUG
+ Code::cast(code)->Print();
+#endif
+ F1 f = FUNCTION_CAST<F1>(Code::cast(code)->entry());
+ int res = reinterpret_cast<int>(CALL_GENERATED_CODE(f, 50, 0, 0, 0, 0));
+ ::printf("f() = %d\n", res);
+ CHECK_EQ(1275, res);
+}
+
+
+TEST(MIPS2) {
+ InitializeVM();
+ v8::HandleScope scope;
+
+ MacroAssembler assm(NULL, 0);
+
+ Label exit, error;
+
+ // ----- Test all instructions.
+
+ // Test lui, ori, and addiu, used in the li pseudo-instruction.
+ // This way we can then safely load registers with chosen values.
+
+ __ ori(t0, zero_reg, 0);
+ __ lui(t0, 0x1234);
+ __ ori(t0, t0, 0);
+ __ ori(t0, t0, 0x0f0f);
+ __ ori(t0, t0, 0xf0f0);
+ __ addiu(t1, t0, 1);
+ __ addiu(t2, t1, -0x10);
+
+ // Load values in temporary registers.
+ __ li(t0, 0x00000004);
+ __ li(t1, 0x00001234);
+ __ li(t2, 0x12345678);
+ __ li(t3, 0x7fffffff);
+ __ li(t4, 0xfffffffc);
+ __ li(t5, 0xffffedcc);
+ __ li(t6, 0xedcba988);
+ __ li(t7, 0x80000000);
+
+ // SPECIAL class.
+ __ srl(v0, t2, 8); // 0x00123456
+ __ sll(v0, v0, 11); // 0x91a2b000
+ __ sra(v0, v0, 3); // 0xf2345600
+ __ srav(v0, v0, t0); // 0xff234560
+ __ sllv(v0, v0, t0); // 0xf2345600
+ __ srlv(v0, v0, t0); // 0x0f234560
+ __ Branch(ne, &error, v0, Operand(0x0f234560));
+ __ nop();
+
+ __ add(v0, t0, t1); // 0x00001238
+ __ sub(v0, v0, t0); // 0x00001234
+ __ Branch(ne, &error, v0, Operand(0x00001234));
+ __ nop();
+ __ addu(v1, t3, t0);
+ __ Branch(ne, &error, v1, Operand(0x80000003));
+ __ nop();
+ __ subu(v1, t7, t0); // 0x7ffffffc
+ __ Branch(ne, &error, v1, Operand(0x7ffffffc));
+ __ nop();
+
+ __ and_(v0, t1, t2); // 0x00001230
+ __ or_(v0, v0, t1); // 0x00001234
+ __ xor_(v0, v0, t2); // 0x1234444c
+ __ nor(v0, v0, t2); // 0xedcba987
+ __ Branch(ne, &error, v0, Operand(0xedcba983));
+ __ nop();
+
+ __ slt(v0, t7, t3);
+ __ Branch(ne, &error, v0, Operand(0x1));
+ __ nop();
+ __ sltu(v0, t7, t3);
+ __ Branch(ne, &error, v0, Operand(0x0));
+ __ nop();
+ // End of SPECIAL class.
+
+ __ addi(v0, zero_reg, 0x7421); // 0x00007421
+ __ addi(v0, v0, -0x1); // 0x00007420
+ __ addiu(v0, v0, -0x20); // 0x00007400
+ __ Branch(ne, &error, v0, Operand(0x00007400));
+ __ nop();
+ __ addiu(v1, t3, 0x1); // 0x80000000
+ __ Branch(ne, &error, v1, Operand(0x80000000));
+ __ nop();
+
+ __ slti(v0, t1, 0x00002000); // 0x1
+ __ slti(v0, v0, 0xffff8000); // 0x0
+ __ Branch(ne, &error, v0, Operand(0x0));
+ __ nop();
+ __ sltiu(v0, t1, 0x00002000); // 0x1
+ __ sltiu(v0, v0, 0x00008000); // 0x1
+ __ Branch(ne, &error, v0, Operand(0x1));
+ __ nop();
+
+ __ andi(v0, t1, 0xf0f0); // 0x00001030
+ __ ori(v0, v0, 0x8a00); // 0x00009a30
+ __ xori(v0, v0, 0x83cc); // 0x000019fc
+ __ Branch(ne, &error, v0, Operand(0x000019fc));
+ __ nop();
+ __ lui(v1, 0x8123); // 0x81230000
+ __ Branch(ne, &error, v1, Operand(0x81230000));
+ __ nop();
+
+ // Everything was correctly executed. Load the expected result.
+ __ li(v0, 0x31415926);
+ __ b(&exit);
+ __ nop();
+
+ __ bind(&error);
+ // Got an error. Return a wrong result.
+
+ __ bind(&exit);
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Object* code = Heap::CreateCode(desc,
+ NULL,
+ Code::ComputeFlags(Code::STUB),
+ Handle<Object>(Heap::undefined_value()));
+ CHECK(code->IsCode());
+#ifdef DEBUG
+ Code::cast(code)->Print();
+#endif
+ F2 f = FUNCTION_CAST<F2>(Code::cast(code)->entry());
+ int res = reinterpret_cast<int>(CALL_GENERATED_CODE(f, 0xab0, 0xc, 0, 0, 0));
+ ::printf("f() = %d\n", res);
+ CHECK_EQ(0x31415926, res);
+}
+
+#undef __
diff --git a/deps/v8/test/cctest/test-compiler.cc b/deps/v8/test/cctest/test-compiler.cc
index 05c29d710..a6b537790 100644
--- a/deps/v8/test/cctest/test-compiler.cc
+++ b/deps/v8/test/cctest/test-compiler.cc
@@ -115,7 +115,8 @@ static void SetGlobalProperty(const char* name, Object* value) {
static Handle<JSFunction> Compile(const char* source) {
Handle<String> source_code(Factory::NewStringFromUtf8(CStrVector(source)));
Handle<JSFunction> boilerplate =
- Compiler::Compile(source_code, Handle<String>(), 0, 0, NULL, NULL);
+ Compiler::Compile(source_code, Handle<String>(), 0, 0, NULL, NULL,
+ Handle<String>::null());
return Factory::NewFunctionFromBoilerplate(boilerplate,
Top::global_context());
}
@@ -317,3 +318,27 @@ TEST(Regression236) {
CHECK_EQ(-1, GetScriptLineNumber(script, 100));
CHECK_EQ(-1, GetScriptLineNumber(script, -1));
}
+
+
+TEST(GetScriptLineNumber) {
+ LocalContext env;
+ v8::HandleScope scope;
+ v8::ScriptOrigin origin = v8::ScriptOrigin(v8::String::New("test"));
+ const char function_f[] = "function f() {}";
+ const int max_rows = 1000;
+ const int buffer_size = max_rows + sizeof(function_f);
+ ScopedVector<char> buffer(buffer_size);
+ memset(buffer.start(), '\n', buffer_size - 1);
+ buffer[buffer_size - 1] = '\0';
+
+ for (int i = 0; i < max_rows; ++i) {
+ if (i > 0)
+ buffer[i - 1] = '\n';
+ memcpy(&buffer[i], function_f, sizeof(function_f) - 1);
+ v8::Handle<v8::String> script_body = v8::String::New(buffer.start());
+ v8::Script::Compile(script_body, &origin)->Run();
+ v8::Local<v8::Function> f = v8::Local<v8::Function>::Cast(
+ env->Global()->Get(v8::String::New("f")));
+ CHECK_EQ(i, f->GetScriptLineNumber());
+ }
+}
diff --git a/deps/v8/test/cctest/test-debug.cc b/deps/v8/test/cctest/test-debug.cc
index 92e18e068..b7c39b226 100644
--- a/deps/v8/test/cctest/test-debug.cc
+++ b/deps/v8/test/cctest/test-debug.cc
@@ -550,6 +550,15 @@ const char* frame_script_data_source =
v8::Local<v8::Function> frame_script_data;
+// Source for The JavaScript function which picks out the script data from
+// AfterCompile event
+const char* compiled_script_data_source =
+ "function compiled_script_data(event_data) {"
+ " return event_data.script().data();"
+ "}";
+v8::Local<v8::Function> compiled_script_data;
+
+
// Source for The JavaScript function which returns the number of frames.
static const char* frame_count_source =
"function frame_count(exec_state) {"
@@ -647,6 +656,19 @@ static void DebugEventBreakPointHitCount(v8::DebugEvent event,
script_data->WriteAscii(last_script_data_hit);
}
}
+ } else if (event == v8::AfterCompile && !compiled_script_data.IsEmpty()) {
+ const int argc = 1;
+ v8::Handle<v8::Value> argv[argc] = { event_data };
+ v8::Handle<v8::Value> result = compiled_script_data->Call(exec_state,
+ argc, argv);
+ if (result->IsUndefined()) {
+ last_script_data_hit[0] = '\0';
+ } else {
+ result = result->ToString();
+ CHECK(result->IsString());
+ v8::Handle<v8::String> script_data(result->ToString());
+ script_data->WriteAscii(last_script_data_hit);
+ }
}
}
@@ -3884,6 +3906,11 @@ bool IsEvaluateResponseMessage(char* message) {
}
+static int StringToInt(const char* s) {
+ return atoi(s); // NOLINT
+}
+
+
// We match parts of the message to get evaluate result int value.
int GetEvaluateIntResult(char *message) {
const char* value = "\"value\":";
@@ -3892,7 +3919,7 @@ int GetEvaluateIntResult(char *message) {
return -1;
}
int res = -1;
- res = atoi(pos + strlen(value));
+ res = StringToInt(pos + strlen(value));
return res;
}
@@ -3905,7 +3932,7 @@ int GetBreakpointIdFromBreakEventMessage(char *message) {
return -1;
}
int res = -1;
- res = atoi(pos + strlen(breakpoints));
+ res = StringToInt(pos + strlen(breakpoints));
return res;
}
@@ -3918,11 +3945,7 @@ int GetTotalFramesInt(char *message) {
return -1;
}
pos += strlen(prefix);
- char* pos_end = pos;
- int res = static_cast<int>(strtol(pos, &pos_end, 10));
- if (pos_end == pos) {
- return -1;
- }
+ int res = StringToInt(pos);
return res;
}
@@ -5231,6 +5254,9 @@ TEST(ScriptNameAndData) {
frame_script_data = CompileFunction(&env,
frame_script_data_source,
"frame_script_data");
+ compiled_script_data = CompileFunction(&env,
+ compiled_script_data_source,
+ "compiled_script_data");
v8::Debug::SetDebugEventListener(DebugEventBreakPointHitCount,
v8::Undefined());
@@ -5277,6 +5303,16 @@ TEST(ScriptNameAndData) {
CHECK_EQ(3, break_point_hit_count);
CHECK_EQ("new name", last_script_name_hit);
CHECK_EQ("abc 123", last_script_data_hit);
+
+ v8::Handle<v8::Script> script3 =
+ v8::Script::Compile(script, &origin2, NULL,
+ v8::String::New("in compile"));
+ CHECK_EQ("in compile", last_script_data_hit);
+ script3->Run();
+ f = v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("f")));
+ f->Call(env->Global(), 0, NULL);
+ CHECK_EQ(4, break_point_hit_count);
+ CHECK_EQ("in compile", last_script_data_hit);
}
diff --git a/deps/v8/test/cctest/test-log.cc b/deps/v8/test/cctest/test-log.cc
index eca2c2b67..9853af324 100644
--- a/deps/v8/test/cctest/test-log.cc
+++ b/deps/v8/test/cctest/test-log.cc
@@ -170,21 +170,110 @@ static void SigProfSignalHandler(int signal, siginfo_t* info, void* context) {
#endif // __linux__
-static int CheckThatProfilerWorks(int log_pos) {
- Logger::ResumeProfiler(v8::PROFILER_MODULE_CPU);
+namespace {
+
+class ScopedLoggerInitializer {
+ public:
+ explicit ScopedLoggerInitializer(bool log, bool prof_lazy)
+ : saved_log_(i::FLAG_log),
+ saved_prof_lazy_(i::FLAG_prof_lazy),
+ saved_prof_(i::FLAG_prof),
+ saved_prof_auto_(i::FLAG_prof_auto),
+ trick_to_run_init_flags_(init_flags_(log, prof_lazy)),
+ need_to_set_up_logger_(i::V8::IsRunning()),
+ scope_(),
+ env_(v8::Context::New()) {
+ if (need_to_set_up_logger_) Logger::Setup();
+ env_->Enter();
+ }
+
+ ~ScopedLoggerInitializer() {
+ env_->Exit();
+ Logger::TearDown();
+ i::FLAG_prof_lazy = saved_prof_lazy_;
+ i::FLAG_prof = saved_prof_;
+ i::FLAG_prof_auto = saved_prof_auto_;
+ i::FLAG_log = saved_log_;
+ }
+
+ v8::Handle<v8::Context>& env() { return env_; }
+
+ private:
+ static bool init_flags_(bool log, bool prof_lazy) {
+ i::FLAG_log = log;
+ i::FLAG_prof = true;
+ i::FLAG_prof_lazy = prof_lazy;
+ i::FLAG_prof_auto = false;
+ i::FLAG_logfile = "*";
+ return prof_lazy;
+ }
+
+ const bool saved_log_;
+ const bool saved_prof_lazy_;
+ const bool saved_prof_;
+ const bool saved_prof_auto_;
+ const bool trick_to_run_init_flags_;
+ const bool need_to_set_up_logger_;
+ v8::HandleScope scope_;
+ v8::Handle<v8::Context> env_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedLoggerInitializer);
+};
+
+
+class LogBufferMatcher {
+ public:
+ LogBufferMatcher() {
+ // Skip all initially logged stuff.
+ log_pos_ = GetLogLines(0, &buffer_);
+ }
+
+ int log_pos() { return log_pos_; }
+
+ int GetNextChunk() {
+ int chunk_size = GetLogLines(log_pos_, &buffer_);
+ CHECK_GT(buffer_.length(), chunk_size);
+ buffer_[chunk_size] = '\0';
+ log_pos_ += chunk_size;
+ return chunk_size;
+ }
+
+ const char* Find(const char* substr) {
+ return strstr(buffer_.start(), substr);
+ }
+
+ const char* Find(const i::Vector<char>& substr) {
+ return Find(substr.start());
+ }
+
+ bool IsInSequence(const char* s1, const char* s2) {
+ const char* s1_pos = Find(s1);
+ const char* s2_pos = Find(s2);
+ CHECK_NE(NULL, s1_pos);
+ CHECK_NE(NULL, s2_pos);
+ return s1_pos < s2_pos;
+ }
+
+ void PrintBuffer() {
+ puts(buffer_.start());
+ }
+
+ private:
+ EmbeddedVector<char, 102400> buffer_;
+ int log_pos_;
+};
+
+} // namespace
+
+
+static void CheckThatProfilerWorks(LogBufferMatcher* matcher) {
+ Logger::ResumeProfiler(v8::PROFILER_MODULE_CPU, 0);
CHECK(LoggerTestHelper::IsSamplerActive());
// Verify that the current map of compiled functions has been logged.
- EmbeddedVector<char, 102400> buffer;
- int map_log_size = GetLogLines(log_pos, &buffer);
- printf("map_log_size: %d\n", map_log_size);
- CHECK_GT(map_log_size, 0);
- CHECK_GT(buffer.length(), map_log_size);
- log_pos += map_log_size;
- // Check buffer contents.
- buffer[map_log_size] = '\0';
+ CHECK_GT(matcher->GetNextChunk(), 0);
const char* code_creation = "\ncode-creation,"; // eq. to /^code-creation,/
- CHECK_NE(NULL, strstr(buffer.start(), code_creation));
+ CHECK_NE(NULL, matcher->Find(code_creation));
#ifdef __linux__
// Intercept SIGPROF handler to make sure that the test process
@@ -204,7 +293,7 @@ static int CheckThatProfilerWorks(int log_pos) {
i::OS::SNPrintF(script_src,
"function f%d(x) { return %d * x; }"
"for (var i = 0; i < 10000; ++i) { f%d(i); }",
- log_pos, log_pos, log_pos);
+ matcher->log_pos(), matcher->log_pos(), matcher->log_pos());
// Run code for 200 msecs to get some ticks.
const double end_time = i::OS::TimeCurrentMillis() + 200;
while (i::OS::TimeCurrentMillis() < end_time) {
@@ -213,7 +302,7 @@ static int CheckThatProfilerWorks(int log_pos) {
i::OS::Sleep(1);
}
- Logger::PauseProfiler(v8::PROFILER_MODULE_CPU);
+ Logger::PauseProfiler(v8::PROFILER_MODULE_CPU, 0);
CHECK(!LoggerTestHelper::IsSamplerActive());
// Wait 50 msecs to allow Profiler thread to process the last
@@ -221,68 +310,39 @@ static int CheckThatProfilerWorks(int log_pos) {
i::OS::Sleep(50);
// Now we must have compiler and tick records.
- int log_size = GetLogLines(log_pos, &buffer);
- printf("log_size: %d\n", log_size);
- CHECK_GT(log_size, 0);
- CHECK_GT(buffer.length(), log_size);
- log_pos += log_size;
- // Check buffer contents.
- buffer[log_size] = '\0';
- printf("%s", buffer.start());
+ CHECK_GT(matcher->GetNextChunk(), 0);
+ matcher->PrintBuffer();
+ CHECK_NE(NULL, matcher->Find(code_creation));
const char* tick = "\ntick,";
- CHECK_NE(NULL, strstr(buffer.start(), code_creation));
- const bool ticks_found = strstr(buffer.start(), tick) != NULL;
+ const bool ticks_found = matcher->Find(tick) != NULL;
CHECK_EQ(was_sigprof_received, ticks_found);
-
- return log_pos;
}
TEST(ProfLazyMode) {
- const bool saved_prof_lazy = i::FLAG_prof_lazy;
- const bool saved_prof = i::FLAG_prof;
- const bool saved_prof_auto = i::FLAG_prof_auto;
- i::FLAG_prof = true;
- i::FLAG_prof_lazy = true;
- i::FLAG_prof_auto = false;
- i::FLAG_logfile = "*";
-
- // If tests are being run manually, V8 will be already initialized
- // by the bottom test.
- const bool need_to_set_up_logger = i::V8::IsRunning();
- v8::HandleScope scope;
- v8::Handle<v8::Context> env = v8::Context::New();
- if (need_to_set_up_logger) Logger::Setup();
- env->Enter();
+ ScopedLoggerInitializer initialize_logger(false, true);
// No sampling should happen prior to resuming profiler.
CHECK(!LoggerTestHelper::IsSamplerActive());
- EmbeddedVector<char, 102400> buffer;
+ LogBufferMatcher matcher;
// Nothing must be logged until profiling is resumed.
- int log_pos = GetLogLines(0, &buffer);
- CHECK_EQ(0, log_pos);
+ CHECK_EQ(0, matcher.log_pos());
CompileAndRunScript("var a = (function(x) { return x + 1; })(10);");
// Nothing must be logged while profiling is suspended.
- CHECK_EQ(0, GetLogLines(log_pos, &buffer));
+ CHECK_EQ(0, matcher.GetNextChunk());
- log_pos = CheckThatProfilerWorks(log_pos);
+ CheckThatProfilerWorks(&matcher);
CompileAndRunScript("var a = (function(x) { return x + 1; })(10);");
// No new data beyond last retrieved position.
- CHECK_EQ(0, GetLogLines(log_pos, &buffer));
+ CHECK_EQ(0, matcher.GetNextChunk());
// Check that profiling can be resumed again.
- CheckThatProfilerWorks(log_pos);
-
- env->Exit();
- Logger::TearDown();
- i::FLAG_prof_lazy = saved_prof_lazy;
- i::FLAG_prof = saved_prof;
- i::FLAG_prof_auto = saved_prof_auto;
+ CheckThatProfilerWorks(&matcher);
}
@@ -480,25 +540,8 @@ static v8::Handle<v8::Value> ObjMethod1(const v8::Arguments& args) {
}
TEST(LogCallbacks) {
- const bool saved_prof_lazy = i::FLAG_prof_lazy;
- const bool saved_prof = i::FLAG_prof;
- const bool saved_prof_auto = i::FLAG_prof_auto;
- i::FLAG_prof = true;
- i::FLAG_prof_lazy = false;
- i::FLAG_prof_auto = false;
- i::FLAG_logfile = "*";
-
- // If tests are being run manually, V8 will be already initialized
- // by the bottom test.
- const bool need_to_set_up_logger = i::V8::IsRunning();
- v8::HandleScope scope;
- v8::Handle<v8::Context> env = v8::Context::New();
- if (need_to_set_up_logger) Logger::Setup();
- env->Enter();
-
- // Skip all initially logged stuff.
- EmbeddedVector<char, 102400> buffer;
- int log_pos = GetLogLines(0, &buffer);
+ ScopedLoggerInitializer initialize_logger(false, false);
+ LogBufferMatcher matcher;
v8::Persistent<v8::FunctionTemplate> obj =
v8::Persistent<v8::FunctionTemplate>::New(v8::FunctionTemplate::New());
@@ -511,16 +554,14 @@ TEST(LogCallbacks) {
signature),
static_cast<v8::PropertyAttribute>(v8::DontDelete));
- env->Global()->Set(v8_str("Obj"), obj->GetFunction());
+ initialize_logger.env()->Global()->Set(v8_str("Obj"), obj->GetFunction());
CompileAndRunScript("Obj.prototype.method1.toString();");
i::Logger::LogCompiledFunctions();
- log_pos = GetLogLines(log_pos, &buffer);
- CHECK_GT(log_pos, 0);
- buffer[log_pos] = 0;
+ CHECK_GT(matcher.GetNextChunk(), 0);
const char* callback_rec = "code-creation,Callback,";
- char* pos = strstr(buffer.start(), callback_rec);
+ char* pos = const_cast<char*>(matcher.Find(callback_rec));
CHECK_NE(NULL, pos);
pos += strlen(callback_rec);
EmbeddedVector<char, 100> ref_data;
@@ -530,12 +571,6 @@ TEST(LogCallbacks) {
CHECK_EQ(ref_data.start(), pos);
obj.Dispose();
-
- env->Exit();
- Logger::TearDown();
- i::FLAG_prof_lazy = saved_prof_lazy;
- i::FLAG_prof = saved_prof;
- i::FLAG_prof_auto = saved_prof_auto;
}
@@ -555,25 +590,8 @@ static v8::Handle<v8::Value> Prop2Getter(v8::Local<v8::String> property,
}
TEST(LogAccessorCallbacks) {
- const bool saved_prof_lazy = i::FLAG_prof_lazy;
- const bool saved_prof = i::FLAG_prof;
- const bool saved_prof_auto = i::FLAG_prof_auto;
- i::FLAG_prof = true;
- i::FLAG_prof_lazy = false;
- i::FLAG_prof_auto = false;
- i::FLAG_logfile = "*";
-
- // If tests are being run manually, V8 will be already initialized
- // by the bottom test.
- const bool need_to_set_up_logger = i::V8::IsRunning();
- v8::HandleScope scope;
- v8::Handle<v8::Context> env = v8::Context::New();
- if (need_to_set_up_logger) Logger::Setup();
- env->Enter();
-
- // Skip all initially logged stuff.
- EmbeddedVector<char, 102400> buffer;
- int log_pos = GetLogLines(0, &buffer);
+ ScopedLoggerInitializer initialize_logger(false, false);
+ LogBufferMatcher matcher;
v8::Persistent<v8::FunctionTemplate> obj =
v8::Persistent<v8::FunctionTemplate>::New(v8::FunctionTemplate::New());
@@ -583,34 +601,112 @@ TEST(LogAccessorCallbacks) {
inst->SetAccessor(v8::String::New("prop2"), Prop2Getter);
i::Logger::LogAccessorCallbacks();
- log_pos = GetLogLines(log_pos, &buffer);
- CHECK_GT(log_pos, 0);
- buffer[log_pos] = 0;
- printf("%s", buffer.start());
+ CHECK_GT(matcher.GetNextChunk(), 0);
+ matcher.PrintBuffer();
EmbeddedVector<char, 100> prop1_getter_record;
i::OS::SNPrintF(prop1_getter_record,
"code-creation,Callback,0x%" V8PRIxPTR ",1,\"get prop1\"",
Prop1Getter);
- CHECK_NE(NULL, strstr(buffer.start(), prop1_getter_record.start()));
+ CHECK_NE(NULL, matcher.Find(prop1_getter_record));
EmbeddedVector<char, 100> prop1_setter_record;
i::OS::SNPrintF(prop1_setter_record,
"code-creation,Callback,0x%" V8PRIxPTR ",1,\"set prop1\"",
Prop1Setter);
- CHECK_NE(NULL, strstr(buffer.start(), prop1_setter_record.start()));
+ CHECK_NE(NULL, matcher.Find(prop1_setter_record));
EmbeddedVector<char, 100> prop2_getter_record;
i::OS::SNPrintF(prop2_getter_record,
"code-creation,Callback,0x%" V8PRIxPTR ",1,\"get prop2\"",
Prop2Getter);
- CHECK_NE(NULL, strstr(buffer.start(), prop2_getter_record.start()));
+ CHECK_NE(NULL, matcher.Find(prop2_getter_record));
obj.Dispose();
+}
- env->Exit();
- Logger::TearDown();
- i::FLAG_prof_lazy = saved_prof_lazy;
- i::FLAG_prof = saved_prof;
- i::FLAG_prof_auto = saved_prof_auto;
+
+TEST(LogTags) {
+ ScopedLoggerInitializer initialize_logger(true, false);
+ LogBufferMatcher matcher;
+
+ const char* open_tag = "open-tag,";
+ const char* close_tag = "close-tag,";
+
+ // Check compatibility with the old style behavior.
+ CHECK_EQ(v8::PROFILER_MODULE_NONE, Logger::GetActiveProfilerModules());
+ Logger::ResumeProfiler(v8::PROFILER_MODULE_CPU, 0);
+ CHECK_EQ(v8::PROFILER_MODULE_CPU, Logger::GetActiveProfilerModules());
+ Logger::PauseProfiler(v8::PROFILER_MODULE_CPU, 0);
+ CHECK_EQ(v8::PROFILER_MODULE_NONE, Logger::GetActiveProfilerModules());
+ CHECK_EQ(NULL, matcher.Find(open_tag));
+ CHECK_EQ(NULL, matcher.Find(close_tag));
+
+ const char* open_tag1 = "open-tag,1\n";
+ const char* close_tag1 = "close-tag,1\n";
+
+ // Check non-nested tag case.
+ CHECK_EQ(v8::PROFILER_MODULE_NONE, Logger::GetActiveProfilerModules());
+ Logger::ResumeProfiler(v8::PROFILER_MODULE_CPU, 1);
+ CHECK_EQ(v8::PROFILER_MODULE_CPU, Logger::GetActiveProfilerModules());
+ Logger::PauseProfiler(v8::PROFILER_MODULE_CPU, 1);
+ CHECK_EQ(v8::PROFILER_MODULE_NONE, Logger::GetActiveProfilerModules());
+ CHECK_GT(matcher.GetNextChunk(), 0);
+ CHECK(matcher.IsInSequence(open_tag1, close_tag1));
+
+ const char* open_tag2 = "open-tag,2\n";
+ const char* close_tag2 = "close-tag,2\n";
+
+ // Check nested tags case.
+ CHECK_EQ(v8::PROFILER_MODULE_NONE, Logger::GetActiveProfilerModules());
+ Logger::ResumeProfiler(v8::PROFILER_MODULE_CPU, 1);
+ CHECK_EQ(v8::PROFILER_MODULE_CPU, Logger::GetActiveProfilerModules());
+ Logger::ResumeProfiler(v8::PROFILER_MODULE_CPU, 2);
+ CHECK_EQ(v8::PROFILER_MODULE_CPU, Logger::GetActiveProfilerModules());
+ Logger::PauseProfiler(v8::PROFILER_MODULE_CPU, 2);
+ CHECK_EQ(v8::PROFILER_MODULE_CPU, Logger::GetActiveProfilerModules());
+ Logger::PauseProfiler(v8::PROFILER_MODULE_CPU, 1);
+ CHECK_EQ(v8::PROFILER_MODULE_NONE, Logger::GetActiveProfilerModules());
+ CHECK_GT(matcher.GetNextChunk(), 0);
+ // open_tag1 < open_tag2 < close_tag2 < close_tag1
+ CHECK(matcher.IsInSequence(open_tag1, open_tag2));
+ CHECK(matcher.IsInSequence(open_tag2, close_tag2));
+ CHECK(matcher.IsInSequence(close_tag2, close_tag1));
+
+ // Check overlapped tags case.
+ CHECK_EQ(v8::PROFILER_MODULE_NONE, Logger::GetActiveProfilerModules());
+ Logger::ResumeProfiler(v8::PROFILER_MODULE_CPU, 1);
+ CHECK_EQ(v8::PROFILER_MODULE_CPU, Logger::GetActiveProfilerModules());
+ Logger::ResumeProfiler(v8::PROFILER_MODULE_CPU, 2);
+ CHECK_EQ(v8::PROFILER_MODULE_CPU, Logger::GetActiveProfilerModules());
+ Logger::PauseProfiler(v8::PROFILER_MODULE_CPU, 1);
+ CHECK_EQ(v8::PROFILER_MODULE_CPU, Logger::GetActiveProfilerModules());
+ Logger::PauseProfiler(v8::PROFILER_MODULE_CPU, 2);
+ CHECK_EQ(v8::PROFILER_MODULE_NONE, Logger::GetActiveProfilerModules());
+ CHECK_GT(matcher.GetNextChunk(), 0);
+ // open_tag1 < open_tag2 < close_tag1 < close_tag2
+ CHECK(matcher.IsInSequence(open_tag1, open_tag2));
+ CHECK(matcher.IsInSequence(open_tag2, close_tag1));
+ CHECK(matcher.IsInSequence(close_tag1, close_tag2));
+
+ const char* open_tag3 = "open-tag,3\n";
+ const char* close_tag3 = "close-tag,3\n";
+
+ // Check pausing overflow case.
+ CHECK_EQ(v8::PROFILER_MODULE_NONE, Logger::GetActiveProfilerModules());
+ Logger::ResumeProfiler(v8::PROFILER_MODULE_CPU, 1);
+ CHECK_EQ(v8::PROFILER_MODULE_CPU, Logger::GetActiveProfilerModules());
+ Logger::ResumeProfiler(v8::PROFILER_MODULE_CPU, 2);
+ CHECK_EQ(v8::PROFILER_MODULE_CPU, Logger::GetActiveProfilerModules());
+ Logger::PauseProfiler(v8::PROFILER_MODULE_CPU, 2);
+ CHECK_EQ(v8::PROFILER_MODULE_CPU, Logger::GetActiveProfilerModules());
+ Logger::PauseProfiler(v8::PROFILER_MODULE_CPU, 1);
+ CHECK_EQ(v8::PROFILER_MODULE_NONE, Logger::GetActiveProfilerModules());
+ Logger::PauseProfiler(v8::PROFILER_MODULE_CPU, 3);
+ CHECK_EQ(v8::PROFILER_MODULE_NONE, Logger::GetActiveProfilerModules());
+ Logger::ResumeProfiler(v8::PROFILER_MODULE_CPU, 3);
+ CHECK_EQ(v8::PROFILER_MODULE_NONE, Logger::GetActiveProfilerModules());
+ // Must be no tags, because logging must be disabled.
+ CHECK_EQ(NULL, matcher.Find(open_tag3));
+ CHECK_EQ(NULL, matcher.Find(close_tag3));
}
diff --git a/deps/v8/test/cctest/test-regexp.cc b/deps/v8/test/cctest/test-regexp.cc
index b1ca45aaa..db312da70 100644
--- a/deps/v8/test/cctest/test-regexp.cc
+++ b/deps/v8/test/cctest/test-regexp.cc
@@ -653,6 +653,8 @@ typedef RegExpMacroAssemblerIA32 ArchRegExpMacroAssembler;
typedef RegExpMacroAssemblerX64 ArchRegExpMacroAssembler;
#elif V8_TARGET_ARCH_ARM
typedef RegExpMacroAssemblerARM ArchRegExpMacroAssembler;
+#elif V8_TARGET_ARCH_MIPS
+typedef RegExpMacroAssembler ArchRegExpMacroAssembler;
#endif
class ContextInitializer {
diff --git a/deps/v8/test/cctest/test-utils.cc b/deps/v8/test/cctest/test-utils.cc
index 1d65e686e..24b3c908a 100644
--- a/deps/v8/test/cctest/test-utils.cc
+++ b/deps/v8/test/cctest/test-utils.cc
@@ -35,111 +35,6 @@
using namespace v8::internal;
-enum Mode {
- forward,
- backward_unsigned
-};
-
-
-static v8::internal::byte* Write(v8::internal::byte* p, Mode m, int x) {
- v8::internal::byte* q = NULL;
- switch (m) {
- case forward:
- q = EncodeInt(p, x);
- CHECK(q <= p + sizeof(x) + 1);
- break;
- case backward_unsigned:
- q = EncodeUnsignedIntBackward(p, x);
- CHECK(q >= p - sizeof(x) - 1);
- break;
- }
- return q;
-}
-
-
-static v8::internal::byte* Read(v8::internal::byte* p, Mode m, int x) {
- v8::internal::byte* q = NULL;
- int y;
- switch (m) {
- case forward:
- q = DecodeInt(p, &y);
- CHECK(q <= p + sizeof(y) + 1);
- break;
- case backward_unsigned: {
- unsigned int uy;
- q = DecodeUnsignedIntBackward(p, &uy);
- y = uy;
- CHECK(q >= p - sizeof(uy) - 1);
- break;
- }
- }
- CHECK(y == x);
- return q;
-}
-
-
-static v8::internal::byte* WriteMany(v8::internal::byte* p, Mode m, int x) {
- p = Write(p, m, x - 7);
- p = Write(p, m, x - 1);
- p = Write(p, m, x);
- p = Write(p, m, x + 1);
- p = Write(p, m, x + 2);
- p = Write(p, m, -x - 5);
- p = Write(p, m, -x - 1);
- p = Write(p, m, -x);
- p = Write(p, m, -x + 1);
- p = Write(p, m, -x + 3);
-
- return p;
-}
-
-
-static v8::internal::byte* ReadMany(v8::internal::byte* p, Mode m, int x) {
- p = Read(p, m, x - 7);
- p = Read(p, m, x - 1);
- p = Read(p, m, x);
- p = Read(p, m, x + 1);
- p = Read(p, m, x + 2);
- p = Read(p, m, -x - 5);
- p = Read(p, m, -x - 1);
- p = Read(p, m, -x);
- p = Read(p, m, -x + 1);
- p = Read(p, m, -x + 3);
-
- return p;
-}
-
-
-void ProcessValues(int* values, int n, Mode m) {
- v8::internal::byte buf[4 * KB]; // make this big enough
- v8::internal::byte* p0 = (m == forward ? buf : buf + ARRAY_SIZE(buf));
-
- v8::internal::byte* p = p0;
- for (int i = 0; i < n; i++) {
- p = WriteMany(p, m, values[i]);
- }
-
- v8::internal::byte* q = p0;
- for (int i = 0; i < n; i++) {
- q = ReadMany(q, m, values[i]);
- }
-
- CHECK(p == q);
-}
-
-
-TEST(Utils0) {
- int values[] = {
- 0, 1, 10, 16, 32, 64, 128, 256, 512, 1024, 1234, 5731,
- 10000, 100000, 1000000, 10000000, 100000000, 1000000000
- };
- const int n = ARRAY_SIZE(values);
-
- ProcessValues(values, n, forward);
- ProcessValues(values, n, backward_unsigned);
-}
-
-
TEST(Utils1) {
CHECK_EQ(-1000000, FastD2I(-1000000.0));
CHECK_EQ(-1, FastD2I(-1.0));
diff --git a/deps/v8/test/es5conform/es5conform.status b/deps/v8/test/es5conform/es5conform.status
index a755016e7..a3f137fcb 100644
--- a/deps/v8/test/es5conform/es5conform.status
+++ b/deps/v8/test/es5conform/es5conform.status
@@ -39,8 +39,6 @@ chapter14: UNIMPLEMENTED
chapter15/15.1: UNIMPLEMENTED
chapter15/15.2/15.2.3/15.2.3.1: UNIMPLEMENTED
chapter15/15.2/15.2.3/15.2.3.5: UNIMPLEMENTED
-chapter15/15.2/15.2.3/15.2.3.6: UNIMPLEMENTED
-chapter15/15.2/15.2.3/15.2.3.7: UNIMPLEMENTED
chapter15/15.2/15.2.3/15.2.3.8: UNIMPLEMENTED
chapter15/15.2/15.2.3/15.2.3.9: UNIMPLEMENTED
chapter15/15.2/15.2.3/15.2.3.10: UNIMPLEMENTED
@@ -48,24 +46,6 @@ chapter15/15.2/15.2.3/15.2.3.11: UNIMPLEMENTED
chapter15/15.2/15.2.3/15.2.3.12: UNIMPLEMENTED
chapter15/15.2/15.2.3/15.2.3.13: UNIMPLEMENTED
-# Object.getPrototypeOf
-chapter15/15.2/15.2.3/15.2.3.2: PASS
-
-# Object.getOwnPropertyDescriptor
-chapter15/15.2/15.2.3/15.2.3.3: PASS
-
-# NOT IMPLEMENTED: defineProperty
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-3: FAIL_OK
-
-# NOT IMPLEMENTED: getOwnPropertyNames
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-16: FAIL_OK
-
-# NOT IMPLEMENTED: defineProperty
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-18: FAIL_OK
-
-# NOT IMPLEMENTED: defineProperties
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-19: FAIL_OK
-
# NOT IMPLEMENTED: seal
chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-20: FAIL_OK
@@ -87,37 +67,24 @@ chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-25: FAIL_OK
# NOT IMPLEMENTED: bind
chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-38: FAIL_OK
-# Built-ins have wrong descriptor (should all be false)
+# NaN is writable
chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-178: FAIL_OK
+# Infinity is writable
chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-179: FAIL_OK
+# undefined is writable
chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-180: FAIL_OK
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-182: FAIL_OK
# Our Function object has a "arguments" property which is used as a non
# property in in the test
chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-183: FAIL_OK
-
# Our Function object has a "caller" property which is used as a non
# property in in the test
chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-184: FAIL_OK
-# Built-ins have wrong descriptor (should all be false)
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-185: FAIL_OK
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-186: FAIL_OK
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-187: FAIL_OK
+# Our function object has a name property which is used as a non
+# property in the test
chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-188: FAIL_OK
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-189: FAIL_OK
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-190: FAIL_OK
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-191: FAIL_OK
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-192: FAIL_OK
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-193: FAIL_OK
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-194: FAIL_OK
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-195: FAIL_OK
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-201: FAIL_OK
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-210: FAIL_OK
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-211: FAIL_OK
-
# NOT IMPLEMENTED: RegExp.prototype.source
chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-212: FAIL_OK
@@ -131,18 +98,6 @@ chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-214: FAIL_OK
# NOT IMPLEMENTED: RegExp.prototype.multiline
chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-215: FAIL_OK
-# Errors have wrong descriptor (should all be false)
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-216: FAIL_OK
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-217: FAIL_OK
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-218: FAIL_OK
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-219: FAIL_OK
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-220: FAIL_OK
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-221: FAIL_OK
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-222: FAIL_OK
-
-# Object.getOwnPropertyNames
-chapter15/15.2/15.2.3/15.2.3.4: PASS
-
# All of the tests below marked SUBSETFAIL (in 15.2.3.4) fail because
# the tests assumes that objects can not have more properties
# than those described in the spec - but according to spec they can
@@ -252,12 +207,9 @@ chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-b-1: FAIL_OK
-# Object.keys
-chapter15/15.2/15.2.3/15.2.3.14: PASS
-
# We fail this because Object.keys returns numbers for element indices
# rather than strings.
-chapter15/15.2/15.2.3/15.2.3.14/15.2.3.14-3-3: FAIL_OK
+#chapter15/15.2/15.2.3/15.2.3.14/15.2.3.14-3-3: FAIL_OK
chapter15/15.3: UNIMPLEMENTED
@@ -267,9 +219,6 @@ chapter15/15.4/15.4.4/15.4.4.20: UNIMPLEMENTED
chapter15/15.4/15.4.4/15.4.4.21: UNIMPLEMENTED
chapter15/15.4/15.4.4/15.4.4.22: UNIMPLEMENTED
-# Array.prototype.every
-chapter15/15.4/15.4.4/15.4.4.16: PASS
-
# Wrong test - because this is not given as argument to arr.every
# this._15_4_4_16_5_1 evaluates to undefined
chapter15/15.4/15.4.4/15.4.4.16/15.4.4.16-5-1: FAIL_OK
@@ -285,10 +234,6 @@ chapter15/15.4/15.4.4/15.4.4.16/15.4.4.16-7-7: FAIL_OK
# if (val>1) in test should be if (val>2)
chapter15/15.4/15.4.4/15.4.4.16/15.4.4.16-8-10: FAIL_OK
-
-# Array.prototype.some
-chapter15/15.4/15.4.4/15.4.4.17: PASS
-
# Wrong assumption - according to spec some returns a Boolean, not a number
chapter15/15.4/15.4.4/15.4.4.17/15.4.4.17-4-9: FAIL_OK
@@ -304,20 +249,12 @@ chapter15/15.4/15.4.4/15.4.4.17/15.4.4.17-7-7: FAIL_OK
# Same as 15.4.4.16-10-8
chapter15/15.4/15.4.4/15.4.4.17/15.4.4.17-8-10: FAIL_OK
-
-# Array.prototype.forEach
-chapter15/15.4/15.4.4/15.4.4.18: PASS
-
# Same as 15.4.4.16-5-1
chapter15/15.4/15.4.4/15.4.4.18/15.4.4.18-5-1: FAIL_OK
# Same as 15.4.4.16-7-7
chapter15/15.4/15.4.4/15.4.4.18/15.4.4.18-7-6: FAIL_OK
-
-# Array.prototype.map
-chapter15/15.4/15.4.4/15.4.4.19: PASS
-
# Same as 15.4.4.16-5-1
chapter15/15.4/15.4.4/15.4.4.19/15.4.4.19-5-1: FAIL_OK
@@ -334,3 +271,8 @@ chapter15/15.7: UNIMPLEMENTED
chapter15/15.9: UNIMPLEMENTED
chapter15/15.10: UNIMPLEMENTED
chapter15/15.12: UNIMPLEMENTED
+
+[ $arch == mips ]
+
+# Skip all tests on MIPS.
+*: SKIP
diff --git a/deps/v8/test/message/message.status b/deps/v8/test/message/message.status
index fc2896b1c..c4a384275 100644
--- a/deps/v8/test/message/message.status
+++ b/deps/v8/test/message/message.status
@@ -29,3 +29,8 @@ prefix message
# All tests in the bug directory are expected to fail.
bugs: FAIL
+
+[ $arch == mips ]
+
+# Skip all tests on MIPS.
+*: SKIP
diff --git a/deps/v8/test/mjsunit/array-functions-prototype-misc.js b/deps/v8/test/mjsunit/array-functions-prototype-misc.js
new file mode 100644
index 000000000..0543c323b
--- /dev/null
+++ b/deps/v8/test/mjsunit/array-functions-prototype-misc.js
@@ -0,0 +1,314 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/**
+ * @fileoverview Test splice, shift, unshift, slice and join on small
+ * and large arrays. Some of these methods are specified such that they
+ * should work on other objects too, so we test that too.
+ */
+
+var LARGE = 40000000;
+var VERYLARGE = 4000000000;
+
+// Nicer for firefox 1.5. Unless you uncomment the following two lines,
+// smjs will appear to hang on this file.
+//var LARGE = 40000;
+//var VERYLARGE = 40000;
+
+var fourhundredth = LARGE/400;
+
+function PseudoArray() {
+};
+
+for (var use_real_arrays = 0; use_real_arrays <= 1; use_real_arrays++) {
+ var poses = [0, 140, 20000, VERYLARGE];
+ var the_prototype;
+ var new_function;
+ var push_function;
+ var concat_function;
+ var slice_function;
+ var splice_function;
+ var splice_function_2;
+ var unshift_function;
+ var unshift_function_2;
+ var shift_function;
+ if (use_real_arrays) {
+ new_function = function(length) {
+ return new Array(length);
+ };
+ the_prototype = Array.prototype;
+ push_function = function(array, elt) {
+ return array.push(elt);
+ };
+ concat_function = function(array, other) {
+ return array.concat(other);
+ };
+ slice_function = function(array, start, len) {
+ return array.slice(start, len);
+ };
+ splice_function = function(array, start, len) {
+ return array.splice(start, len);
+ };
+ splice_function_2 = function(array, start, len, elt) {
+ return array.splice(start, len, elt);
+ };
+ unshift_function = function(array, elt) {
+ return array.unshift(elt);
+ };
+ unshift_function_2 = function(array, elt1, elt2) {
+ return array.unshift(elt1, elt2);
+ };
+ shift_function = function(array) {
+ return array.shift();
+ };
+ } else {
+ // Don't run largest size on non-arrays or we'll be here for ever.
+ poses.pop();
+ new_function = function(length) {
+ var obj = new PseudoArray();
+ obj.length = length;
+ return obj;
+ };
+ the_prototype = PseudoArray.prototype;
+ push_function = function(array, elt) {
+ array[array.length] = elt;
+ array.length++;
+ };
+ concat_function = function(array, other) {
+ return Array.prototype.concat.call(array, other);
+ };
+ slice_function = function(array, start, len) {
+ return Array.prototype.slice.call(array, start, len);
+ };
+ splice_function = function(array, start, len) {
+ return Array.prototype.splice.call(array, start, len);
+ };
+ splice_function_2 = function(array, start, len, elt) {
+ return Array.prototype.splice.call(array, start, len, elt);
+ };
+ unshift_function = function(array, elt) {
+ return Array.prototype.unshift.call(array, elt);
+ };
+ unshift_function_2 = function(array, elt1, elt2) {
+ return Array.prototype.unshift.call(array, elt1, elt2);
+ };
+ shift_function = function(array) {
+ return Array.prototype.shift.call(array);
+ };
+ }
+
+ for (var pos_pos = 0; pos_pos < poses.length; pos_pos++) {
+ var pos = poses[pos_pos];
+ if (pos > 100) {
+ var a = new_function(pos);
+ assertEquals(pos, a.length);
+ push_function(a, 'foo');
+ assertEquals(pos + 1, a.length);
+ var b = ['bar'];
+ // Delete a huge number of holes.
+ var c = splice_function(a, 10, pos - 20);
+ assertEquals(pos - 20, c.length);
+ assertEquals(21, a.length);
+ }
+
+ // Add a numeric property to the prototype of the array class. This
+ // allows us to test some borderline stuff relative to the standard.
+ the_prototype["" + (pos + 1)] = 'baz';
+
+ if (use_real_arrays) {
+ // It seems quite clear from ECMAScript spec 15.4.4.5. Just call Get on
+ // every integer in the range.
+ // IE, Safari get this right.
+ // FF, Opera get this wrong.
+ var a = ['zero', ,'two'];
+ if (pos == 0) {
+ assertEquals("zero,baz,two", a.join(","));
+ }
+
+ // Concat only applies to real arrays, unlike most of the other methods.
+ var a = new_function(pos);
+ push_function(a, "con");
+ assertEquals("con", a[pos]);
+ assertEquals(pos + 1, a.length);
+ var b = new_function(0);
+ push_function(b, "cat");
+ assertEquals("cat", b[0]);
+ var ab = concat_function(a, b);
+ assertEquals("con", ab[pos]);
+ assertEquals(pos + 2, ab.length);
+ assertEquals("cat", ab[pos + 1]);
+ var ba = concat_function(b, a);
+ assertEquals("con", ba[pos + 1]);
+ assertEquals(pos + 2, ba.length);
+ assertEquals("cat", ba[0]);
+
+ // Join with '' as separator.
+ var join = a.join('');
+ assertEquals("con", join);
+ join = b.join('');
+ assertEquals("cat", join);
+ join = ab.join('');
+ assertEquals("concat", join);
+ join = ba.join('');
+ assertEquals("catcon", join);
+
+ var sparse = [];
+ sparse[pos + 1000] = 'is ';
+ sparse[pos + 271828] = 'time ';
+ sparse[pos + 31415] = 'the ';
+ sparse[pos + 012260199] = 'all ';
+ sparse[-1] = 'foo';
+ sparse[pos + 22591927] = 'good ';
+ sparse[pos + 1618033] = 'for ';
+ sparse[pos + 91] = ': Now ';
+ sparse[pos + 86720199] = 'men.';
+ sparse.hest = 'fisk';
+
+ assertEquals("baz: Now is the time for all good men.", sparse.join(''));
+ }
+
+ a = new_function(pos);
+ push_function(a, 'zero');
+ push_function(a, void 0);
+ push_function(a, 'two');
+
+ // Splice works differently from join.
+ // IE, Safari get this wrong.
+ // FF, Opera get this right.
+ // 15.4.4.12 line 24 says the object itself has to have the property...
+ var zero = splice_function(a, pos, 1);
+ assertEquals("undefined", typeof(a[pos]));
+ assertEquals("two", a[pos+1], "pos1:" + pos);
+ assertEquals(pos + 2, a.length, "a length");
+ assertEquals(1, zero.length, "zero length");
+ assertEquals("zero", zero[0]);
+
+ // 15.4.4.12 line 41 says the object itself has to have the property...
+ a = new_function(pos);
+ push_function(a, 'zero');
+ push_function(a, void 0);
+ push_function(a, 'two');
+ var nothing = splice_function_2(a, pos, 0, 'minus1');
+ assertEquals("minus1", a[pos]);
+ assertEquals("zero", a[pos+1]);
+ assertEquals("undefined", typeof(a[pos+2]), "toot!");
+ assertEquals("two", a[pos+3], "pos3");
+ assertEquals(pos + 4, a.length);
+ assertEquals(1, zero.length);
+ assertEquals("zero", zero[0]);
+
+ // 15.4.4.12 line 10 says the object itself has to have the property...
+ a = new_function(pos);
+ push_function(a, 'zero');
+ push_function(a, void 0);
+ push_function(a, 'two');
+ var one = splice_function(a, pos + 1, 1);
+ assertEquals("", one.join(","));
+ assertEquals(pos + 2, a.length);
+ assertEquals("zero", a[pos]);
+ assertEquals("two", a[pos+1]);
+
+ // Set things back to the way they were.
+ the_prototype[pos + 1] = undefined;
+
+ // Unshift.
+ var a = new_function(pos);
+ push_function(a, "foo");
+ assertEquals("foo", a[pos]);
+ assertEquals(pos + 1, a.length);
+ unshift_function(a, "bar");
+ assertEquals("foo", a[pos+1]);
+ assertEquals(pos + 2, a.length);
+ assertEquals("bar", a[0]);
+ unshift_function_2(a, "baz", "boo");
+ assertEquals("foo", a[pos+3]);
+ assertEquals(pos + 4, a.length);
+ assertEquals("baz", a[0]);
+ assertEquals("boo", a[1]);
+ assertEquals("bar", a[2]);
+
+ // Shift.
+ var baz = shift_function(a);
+ assertEquals("baz", baz);
+ assertEquals("boo", a[0]);
+ assertEquals(pos + 3, a.length);
+ assertEquals("foo", a[pos + 2]);
+
+ // Slice.
+ var bar = slice_function(a, 1, 0); // don't throw an exception please.
+ bar = slice_function(a, 1, 2);
+ assertEquals("bar", bar[0]);
+ assertEquals(1, bar.length);
+ assertEquals("bar", a[1]);
+
+ }
+}
+
+// Lets see if performance is reasonable.
+
+var a = new Array(LARGE + 10);
+for (var i = 0; i < a.length; i += 1000) {
+ a[i] = i;
+}
+
+// Take something near the end of the array.
+for (var i = 0; i < 100; i++) {
+ var top = a.splice(LARGE, 5);
+ assertEquals(5, top.length);
+ assertEquals(LARGE, top[0]);
+ assertEquals("undefined", typeof(top[1]));
+ assertEquals(LARGE + 5, a.length);
+ a.splice(LARGE, 0, LARGE);
+ a.length = LARGE + 10;
+}
+
+var a = new Array(LARGE + 10);
+for (var i = 0; i < a.length; i += fourhundredth) {
+ a[i] = i;
+}
+
+// Take something near the middle of the array.
+for (var i = 0; i < 10; i++) {
+ var top = a.splice(LARGE >> 1, 5);
+ assertEquals(5, top.length);
+ assertEquals(LARGE >> 1, top[0]);
+ assertEquals("undefined", typeof(top[1]));
+ assertEquals(LARGE + 5, a.length);
+ a.splice(LARGE >> 1, 0, LARGE >> 1, void 0, void 0, void 0, void 0);
+}
+
+
+// Test http://b/issue?id=1202711
+arr = [0];
+arr.length = 2;
+Array.prototype[1] = 1;
+assertEquals(1, arr.pop());
+assertEquals(0, arr.pop());
+Array.prototype[1] = undefined;
+
+// Test http://code.google.com/p/chromium/issues/detail?id=21860
+Array.prototype.push.apply([], [1].splice(0, -(-1 % 5)));
diff --git a/deps/v8/test/mjsunit/array-shift.js b/deps/v8/test/mjsunit/array-shift.js
new file mode 100644
index 000000000..d985b31e0
--- /dev/null
+++ b/deps/v8/test/mjsunit/array-shift.js
@@ -0,0 +1,71 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Check that shifting array of holes keeps it as array of holes
+(function() {
+ var array = new Array(10);
+ array.shift();
+ assertFalse(0 in array);
+})();
+
+// Now check the case with array of holes and some elements on prototype.
+(function() {
+ var len = 9;
+ var array = new Array(len);
+ Array.prototype[3] = "@3";
+ Array.prototype[7] = "@7";
+
+ assertEquals(len, array.length);
+ for (var i = 0; i < array.length; i++) {
+ assertEquals(array[i], Array.prototype[i]);
+ }
+
+ array.shift();
+
+ assertEquals(len - 1, array.length);
+ // Note that shift copies values from prototype into the array.
+ assertEquals(array[2], Array.prototype[3]);
+ assertTrue(array.hasOwnProperty(2));
+
+ assertEquals(array[6], Array.prototype[7]);
+ assertTrue(array.hasOwnProperty(6));
+
+ // ... but keeps the rest as holes:
+ Array.prototype[5] = "@5";
+ assertEquals(array[5], Array.prototype[5]);
+ assertFalse(array.hasOwnProperty(5));
+
+ assertEquals(array[3], Array.prototype[3]);
+ assertFalse(array.hasOwnProperty(3));
+
+ assertEquals(array[7], Array.prototype[7]);
+ assertFalse(array.hasOwnProperty(7));
+
+ assertTrue(delete Array.prototype[3]);
+ assertTrue(delete Array.prototype[5]);
+ assertTrue(delete Array.prototype[7]);
+})();
diff --git a/deps/v8/test/mjsunit/array-slice.js b/deps/v8/test/mjsunit/array-slice.js
new file mode 100644
index 000000000..c993a077f
--- /dev/null
+++ b/deps/v8/test/mjsunit/array-slice.js
@@ -0,0 +1,162 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Check that slicing array of holes keeps it as array of holes
+(function() {
+ var array = new Array(10);
+ for (var i = 0; i < 7; i++) {
+ var sliced = array.slice();
+ assertEquals(array.length, sliced.length);
+ assertFalse(0 in sliced);
+ }
+})();
+
+
+// Check various forms of arguments omission.
+(function() {
+ var array = new Array(7);
+
+ for (var i = 0; i < 7; i++) {
+ assertEquals(array, array.slice());
+ assertEquals(array, array.slice(0));
+ assertEquals(array, array.slice(undefined));
+ assertEquals(array, array.slice("foobar"));
+ assertEquals(array, array.slice(undefined, undefined));
+ }
+})();
+
+
+// Check variants of negatives and positive indices.
+(function() {
+ var array = new Array(7);
+
+ for (var i = 0; i < 7; i++) {
+ assertEquals(7, array.slice(-100).length);
+ assertEquals(3, array.slice(-3).length);
+ assertEquals(3, array.slice(4).length);
+ assertEquals(1, array.slice(6).length);
+ assertEquals(0, array.slice(7).length);
+ assertEquals(0, array.slice(8).length);
+ assertEquals(0, array.slice(100).length);
+
+ assertEquals(0, array.slice(0, -100).length);
+ assertEquals(4, array.slice(0, -3).length);
+ assertEquals(4, array.slice(0, 4).length);
+ assertEquals(6, array.slice(0, 6).length);
+ assertEquals(7, array.slice(0, 7).length);
+ assertEquals(7, array.slice(0, 8).length);
+ assertEquals(7, array.slice(0, 100).length);
+
+ // Some exotic cases.
+
+ obj = { toString: function() { throw 'Exception'; } };
+
+ // More than 2 arguments:
+ assertEquals(7, array.slice(0, 7, obj, null, undefined).length);
+
+ // Custom conversion:
+ assertEquals(1, array.slice({valueOf: function() { return 1; }},
+ {toString: function() { return 2; }}).length);
+
+ // Throwing an exception in conversion:
+ try {
+ assertEquals(7, array.slice(0, obj).length);
+ throw 'Should have thrown';
+ } catch (e) {
+ assertEquals('Exception', e);
+ }
+ }
+})();
+
+
+// Nasty: modify the array in ToInteger.
+(function() {
+ var array = [];
+ var expected = []
+ bad_guy = { valueOf: function() { array.push(array.length); return -1; } };
+
+ for (var i = 0; i < 13; i++) {
+ var sliced = array.slice(bad_guy);
+ expected.push(i);
+ assertEquals(expected, array);
+ // According to the spec (15.4.4.10), length is calculated before
+ // performing ToInteger on arguments.
+ if (i == 0) {
+ assertEquals([], sliced); // Length was 0, nothing to get.
+ } else {
+ // Actually out of array [0..i] we get [i - 1] as length is i.
+ assertEquals([i - 1], sliced);
+ }
+ }
+})();
+
+
+// Now check the case with array of holes and some elements on prototype.
+(function() {
+ var len = 9;
+ var array = new Array(len);
+
+ var at3 = "@3";
+ var at7 = "@7";
+
+ for (var i = 0; i < 7; i++) {
+ Array.prototype[3] = at3;
+ Array.prototype[7] = at7;
+
+ assertEquals(len, array.length);
+ for (var i = 0; i < array.length; i++) {
+ assertEquals(array[i], Array.prototype[i]);
+ }
+
+ var sliced = array.slice();
+
+ assertEquals(len, sliced.length);
+
+ assertTrue(delete Array.prototype[3]);
+ assertTrue(delete Array.prototype[7]);
+
+ // Note that slice copies values from prototype into the array.
+ assertEquals(array[3], undefined);
+ assertFalse(array.hasOwnProperty(3));
+ assertEquals(sliced[3], at3);
+ assertTrue(sliced.hasOwnProperty(3));
+
+ assertEquals(array[7], undefined);
+ assertFalse(array.hasOwnProperty(7));
+ assertEquals(sliced[7], at7);
+ assertTrue(sliced.hasOwnProperty(7));
+
+ // ... but keeps the rest as holes:
+ Array.prototype[5] = "@5";
+ assertEquals(array[5], Array.prototype[5]);
+ assertFalse(array.hasOwnProperty(5));
+ assertEquals(sliced[5], Array.prototype[5]);
+ assertFalse(sliced.hasOwnProperty(5));
+
+ assertTrue(delete Array.prototype[5]);
+ }
+})();
diff --git a/deps/v8/test/mjsunit/array-splice.js b/deps/v8/test/mjsunit/array-splice.js
index 0543c323b..18f81fe84 100644
--- a/deps/v8/test/mjsunit/array-splice.js
+++ b/deps/v8/test/mjsunit/array-splice.js
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,290 +25,265 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-/**
- * @fileoverview Test splice, shift, unshift, slice and join on small
- * and large arrays. Some of these methods are specified such that they
- * should work on other objects too, so we test that too.
- */
-
-var LARGE = 40000000;
-var VERYLARGE = 4000000000;
-
-// Nicer for firefox 1.5. Unless you uncomment the following two lines,
-// smjs will appear to hang on this file.
-//var LARGE = 40000;
-//var VERYLARGE = 40000;
-
-var fourhundredth = LARGE/400;
-
-function PseudoArray() {
-};
-
-for (var use_real_arrays = 0; use_real_arrays <= 1; use_real_arrays++) {
- var poses = [0, 140, 20000, VERYLARGE];
- var the_prototype;
- var new_function;
- var push_function;
- var concat_function;
- var slice_function;
- var splice_function;
- var splice_function_2;
- var unshift_function;
- var unshift_function_2;
- var shift_function;
- if (use_real_arrays) {
- new_function = function(length) {
- return new Array(length);
- };
- the_prototype = Array.prototype;
- push_function = function(array, elt) {
- return array.push(elt);
- };
- concat_function = function(array, other) {
- return array.concat(other);
- };
- slice_function = function(array, start, len) {
- return array.slice(start, len);
- };
- splice_function = function(array, start, len) {
- return array.splice(start, len);
- };
- splice_function_2 = function(array, start, len, elt) {
- return array.splice(start, len, elt);
- };
- unshift_function = function(array, elt) {
- return array.unshift(elt);
- };
- unshift_function_2 = function(array, elt1, elt2) {
- return array.unshift(elt1, elt2);
- };
- shift_function = function(array) {
- return array.shift();
- };
- } else {
- // Don't run largest size on non-arrays or we'll be here for ever.
- poses.pop();
- new_function = function(length) {
- var obj = new PseudoArray();
- obj.length = length;
- return obj;
- };
- the_prototype = PseudoArray.prototype;
- push_function = function(array, elt) {
- array[array.length] = elt;
- array.length++;
- };
- concat_function = function(array, other) {
- return Array.prototype.concat.call(array, other);
- };
- slice_function = function(array, start, len) {
- return Array.prototype.slice.call(array, start, len);
- };
- splice_function = function(array, start, len) {
- return Array.prototype.splice.call(array, start, len);
- };
- splice_function_2 = function(array, start, len, elt) {
- return Array.prototype.splice.call(array, start, len, elt);
- };
- unshift_function = function(array, elt) {
- return Array.prototype.unshift.call(array, elt);
- };
- unshift_function_2 = function(array, elt1, elt2) {
- return Array.prototype.unshift.call(array, elt1, elt2);
- };
- shift_function = function(array) {
- return Array.prototype.shift.call(array);
- };
+// Check that splicing array of holes keeps it as array of holes
+(function() {
+ for (var i = 0; i < 7; i++) {
+ var array = new Array(10);
+ var spliced = array.splice(1, 1, 'one', 'two');
+ assertEquals(1, spliced.length);
+ assertFalse(0 in spliced);
+
+ assertEquals(11, array.length);
+ assertFalse(0 in array);
+ assertTrue(1 in array);
+ assertTrue(2 in array);
+ assertFalse(3 in array);
+ }
+})();
+
+
+// Check various forms of arguments omission.
+(function() {
+ var array;
+ for (var i = 0; i < 7; i++) {
+ // SpiderMonkey and JSC return undefined in the case where no
+ // arguments are given instead of using the implicit undefined
+ // arguments. This does not follow ECMA-262, but we do the same for
+ // compatibility.
+ // TraceMonkey follows ECMA-262 though.
+ array = [1, 2, 3]
+ assertEquals(undefined, array.splice());
+ assertEquals([1, 2, 3], array);
+
+ // SpiderMonkey, TraceMonkey and JSC treat the case where no delete count is
+ // given differently from when an undefined delete count is given.
+ // This does not follow ECMA-262, but we do the same for
+ // compatibility.
+ array = [1, 2, 3]
+ assertEquals([1, 2, 3], array.splice(0));
+ assertEquals([], array);
+
+ array = [1, 2, 3]
+ assertEquals([1, 2, 3], array.splice(undefined));
+ assertEquals([], array);
+
+ array = [1, 2, 3]
+ assertEquals([1, 2, 3], array.splice("foobar"));
+ assertEquals([], array);
+
+ array = [1, 2, 3]
+ assertEquals([], array.splice(undefined, undefined));
+ assertEquals([1, 2, 3], array);
+
+ array = [1, 2, 3]
+ assertEquals([], array.splice("foobar", undefined));
+ assertEquals([1, 2, 3], array);
+
+ array = [1, 2, 3]
+ assertEquals([], array.splice(undefined, "foobar"));
+ assertEquals([1, 2, 3], array);
+
+ array = [1, 2, 3]
+ assertEquals([], array.splice("foobar", "foobar"));
+ assertEquals([1, 2, 3], array);
}
+})();
+
+
+// Check variants of negatives and positive indices.
+(function() {
+ var array, spliced;
+ for (var i = 0; i < 7; i++) {
+ array = [1, 2, 3, 4, 5, 6, 7];
+ spliced = array.splice(-100);
+ assertEquals([], array);
+ assertEquals([1, 2, 3, 4, 5, 6, 7], spliced);
+
+ array = [1, 2, 3, 4, 5, 6, 7];
+ spliced = array.splice(-3);
+ assertEquals([1, 2, 3, 4], array);
+ assertEquals([5, 6, 7], spliced);
+
+ array = [1, 2, 3, 4, 5, 6, 7];
+ spliced = array.splice(4);
+ assertEquals([1, 2, 3, 4], array);
+ assertEquals([5, 6, 7], spliced);
+
+ array = [1, 2, 3, 4, 5, 6, 7];
+ spliced = array.splice(6);
+ assertEquals([1, 2, 3, 4, 5, 6], array);
+ assertEquals([7], spliced);
+
+ array = [1, 2, 3, 4, 5, 6, 7];
+ spliced = array.splice(7);
+ assertEquals([1, 2, 3, 4, 5, 6, 7], array);
+ assertEquals([], spliced);
+
+ array = [1, 2, 3, 4, 5, 6, 7];
+ spliced = array.splice(8);
+ assertEquals([1, 2, 3, 4, 5, 6, 7], array);
+ assertEquals([], spliced);
+
+ array = [1, 2, 3, 4, 5, 6, 7];
+ spliced = array.splice(100);
+ assertEquals([1, 2, 3, 4, 5, 6, 7], array);
+ assertEquals([], spliced);
+
+ array = [1, 2, 3, 4, 5, 6, 7];
+ spliced = array.splice(0, -100);
+ assertEquals([1, 2, 3, 4, 5, 6, 7], array);
+ assertEquals([], spliced);
+
+ array = [1, 2, 3, 4, 5, 6, 7];
+ spliced = array.splice(0, -3);
+ assertEquals([1, 2, 3, 4, 5, 6, 7], array);
+ assertEquals([], spliced);
+
+ array = [1, 2, 3, 4, 5, 6, 7];
+ spliced = array.splice(0, 4);
+ assertEquals([5, 6, 7], array);
+ assertEquals([1, 2, 3, 4], spliced);
+
+ array = [1, 2, 3, 4, 5, 6, 7];
+ spliced = array.splice(0, 6);
+ assertEquals([7], array);
+ assertEquals([1, 2, 3, 4, 5, 6], spliced);
+
+ array = [1, 2, 3, 4, 5, 6, 7];
+ spliced = array.splice(0, 7);
+ assertEquals([], array);
+ assertEquals([1, 2, 3, 4, 5, 6, 7], spliced);
- for (var pos_pos = 0; pos_pos < poses.length; pos_pos++) {
- var pos = poses[pos_pos];
- if (pos > 100) {
- var a = new_function(pos);
- assertEquals(pos, a.length);
- push_function(a, 'foo');
- assertEquals(pos + 1, a.length);
- var b = ['bar'];
- // Delete a huge number of holes.
- var c = splice_function(a, 10, pos - 20);
- assertEquals(pos - 20, c.length);
- assertEquals(21, a.length);
+ array = [1, 2, 3, 4, 5, 6, 7];
+ spliced = array.splice(0, 8);
+ assertEquals([], array);
+ assertEquals([1, 2, 3, 4, 5, 6, 7], spliced);
+
+ array = [1, 2, 3, 4, 5, 6, 7];
+ spliced = array.splice(0, 100);
+ assertEquals([], array);
+ assertEquals([1, 2, 3, 4, 5, 6, 7], spliced);
+
+ // Some exotic cases.
+ obj = { toString: function() { throw 'Exception'; } };
+
+ // Throwing an exception in conversion:
+ try {
+ [1, 2, 3].splice(obj, 3);
+ throw 'Should have thrown';
+ } catch (e) {
+ assertEquals('Exception', e);
}
- // Add a numeric property to the prototype of the array class. This
- // allows us to test some borderline stuff relative to the standard.
- the_prototype["" + (pos + 1)] = 'baz';
-
- if (use_real_arrays) {
- // It seems quite clear from ECMAScript spec 15.4.4.5. Just call Get on
- // every integer in the range.
- // IE, Safari get this right.
- // FF, Opera get this wrong.
- var a = ['zero', ,'two'];
- if (pos == 0) {
- assertEquals("zero,baz,two", a.join(","));
- }
-
- // Concat only applies to real arrays, unlike most of the other methods.
- var a = new_function(pos);
- push_function(a, "con");
- assertEquals("con", a[pos]);
- assertEquals(pos + 1, a.length);
- var b = new_function(0);
- push_function(b, "cat");
- assertEquals("cat", b[0]);
- var ab = concat_function(a, b);
- assertEquals("con", ab[pos]);
- assertEquals(pos + 2, ab.length);
- assertEquals("cat", ab[pos + 1]);
- var ba = concat_function(b, a);
- assertEquals("con", ba[pos + 1]);
- assertEquals(pos + 2, ba.length);
- assertEquals("cat", ba[0]);
-
- // Join with '' as separator.
- var join = a.join('');
- assertEquals("con", join);
- join = b.join('');
- assertEquals("cat", join);
- join = ab.join('');
- assertEquals("concat", join);
- join = ba.join('');
- assertEquals("catcon", join);
-
- var sparse = [];
- sparse[pos + 1000] = 'is ';
- sparse[pos + 271828] = 'time ';
- sparse[pos + 31415] = 'the ';
- sparse[pos + 012260199] = 'all ';
- sparse[-1] = 'foo';
- sparse[pos + 22591927] = 'good ';
- sparse[pos + 1618033] = 'for ';
- sparse[pos + 91] = ': Now ';
- sparse[pos + 86720199] = 'men.';
- sparse.hest = 'fisk';
-
- assertEquals("baz: Now is the time for all good men.", sparse.join(''));
+ try {
+ [1, 2, 3].splice(0, obj, 3);
+ throw 'Should have thrown';
+ } catch (e) {
+ assertEquals('Exception', e);
+ }
+
+ array = [1, 2, 3];
+ array.splice(0, 3, obj);
+ assertEquals(1, array.length);
+
+ // Custom conversion:
+ array = [1, 2, 3];
+ spliced = array.splice({valueOf: function() { return 1; }},
+ {toString: function() { return 2; }},
+ 'one', 'two');
+ assertEquals([2, 3], spliced);
+ assertEquals([1, 'one', 'two'], array);
+ }
+})();
+
+
+// Nasty: modify the array in ToInteger.
+(function() {
+ var array = [];
+ var spliced;
+
+ for (var i = 0; i < 13; i++) {
+ bad_start = { valueOf: function() { array.push(2*i); return -1; } };
+ bad_count = { valueOf: function() { array.push(2*i + 1); return 1; } };
+ spliced = array.splice(bad_start, bad_count);
+ // According to the spec (15.4.4.12), length is calculated before
+ // performing ToInteger on arguments. However, v8 ignores elements
+ // we add while converting, so we need corrective pushes.
+ array.push(2*i); array.push(2*i + 1);
+ if (i == 0) {
+ assertEquals([], spliced); // Length was 0, nothing to get.
+ assertEquals([0, 1], array);
+ } else {
+ // When we start splice, array is [0 .. 2*i - 1], so we get
+ // as a result [2*i], this element is removed from the array,
+ // but [2 * i, 2 * i + 1] are added.
+ assertEquals([2 * i - 1], spliced);
+ assertEquals(2 * i, array[i]);
+ assertEquals(2 * i + 1, array[i + 1]);
}
+ }
+})();
+
- a = new_function(pos);
- push_function(a, 'zero');
- push_function(a, void 0);
- push_function(a, 'two');
-
- // Splice works differently from join.
- // IE, Safari get this wrong.
- // FF, Opera get this right.
- // 15.4.4.12 line 24 says the object itself has to have the property...
- var zero = splice_function(a, pos, 1);
- assertEquals("undefined", typeof(a[pos]));
- assertEquals("two", a[pos+1], "pos1:" + pos);
- assertEquals(pos + 2, a.length, "a length");
- assertEquals(1, zero.length, "zero length");
- assertEquals("zero", zero[0]);
-
- // 15.4.4.12 line 41 says the object itself has to have the property...
- a = new_function(pos);
- push_function(a, 'zero');
- push_function(a, void 0);
- push_function(a, 'two');
- var nothing = splice_function_2(a, pos, 0, 'minus1');
- assertEquals("minus1", a[pos]);
- assertEquals("zero", a[pos+1]);
- assertEquals("undefined", typeof(a[pos+2]), "toot!");
- assertEquals("two", a[pos+3], "pos3");
- assertEquals(pos + 4, a.length);
- assertEquals(1, zero.length);
- assertEquals("zero", zero[0]);
-
- // 15.4.4.12 line 10 says the object itself has to have the property...
- a = new_function(pos);
- push_function(a, 'zero');
- push_function(a, void 0);
- push_function(a, 'two');
- var one = splice_function(a, pos + 1, 1);
- assertEquals("", one.join(","));
- assertEquals(pos + 2, a.length);
- assertEquals("zero", a[pos]);
- assertEquals("two", a[pos+1]);
-
- // Set things back to the way they were.
- the_prototype[pos + 1] = undefined;
-
- // Unshift.
- var a = new_function(pos);
- push_function(a, "foo");
- assertEquals("foo", a[pos]);
- assertEquals(pos + 1, a.length);
- unshift_function(a, "bar");
- assertEquals("foo", a[pos+1]);
- assertEquals(pos + 2, a.length);
- assertEquals("bar", a[0]);
- unshift_function_2(a, "baz", "boo");
- assertEquals("foo", a[pos+3]);
- assertEquals(pos + 4, a.length);
- assertEquals("baz", a[0]);
- assertEquals("boo", a[1]);
- assertEquals("bar", a[2]);
-
- // Shift.
- var baz = shift_function(a);
- assertEquals("baz", baz);
- assertEquals("boo", a[0]);
- assertEquals(pos + 3, a.length);
- assertEquals("foo", a[pos + 2]);
-
- // Slice.
- var bar = slice_function(a, 1, 0); // don't throw an exception please.
- bar = slice_function(a, 1, 2);
- assertEquals("bar", bar[0]);
- assertEquals(1, bar.length);
- assertEquals("bar", a[1]);
+// Now check the case with array of holes and some elements on prototype.
+(function() {
+ var len = 9;
+
+ var at3 = "@3";
+ var at7 = "@7";
+
+ for (var i = 0; i < 7; i++) {
+ var array = new Array(len);
+ Array.prototype[3] = at3;
+ Array.prototype[7] = at7;
+
+ var spliced = array.splice(2, 2, 'one', undefined, 'two');
+
+ // Second hole (at index 3) of array turns into
+ // value of Array.prototype[3] while copying.
+ assertEquals([, at3], spliced);
+ assertEquals([, , 'one', undefined, 'two', , , at7, at7, ,], array);
+
+ // ... but array[7] is actually a hole:
+ assertTrue(delete Array.prototype[7]);
+ assertEquals(undefined, array[7]);
+
+ // and now check hasOwnProperty
+ assertFalse(array.hasOwnProperty(0));
+ assertFalse(array.hasOwnProperty(1));
+ assertTrue(array.hasOwnProperty(2));
+ assertTrue(array.hasOwnProperty(3));
+ assertTrue(array.hasOwnProperty(4));
+ assertFalse(array.hasOwnProperty(5));
+ assertFalse(array.hasOwnProperty(6));
+ assertFalse(array.hasOwnProperty(7));
+ assertTrue(array.hasOwnProperty(8));
+ assertFalse(array.hasOwnProperty(9));
+
+ // and now check couple of indices above length.
+ assertFalse(array.hasOwnProperty(10));
+ assertFalse(array.hasOwnProperty(15));
+ assertFalse(array.hasOwnProperty(31));
+ assertFalse(array.hasOwnProperty(63));
+ assertFalse(array.hasOwnProperty(2 << 32 - 1));
+ }
+})();
+
+
+// Check the behaviour when approaching maximal values for length.
+(function() {
+ for (var i = 0; i < 7; i++) {
+ try {
+ new Array((1 << 32) - 3).splice(-1, 0, 1, 2, 3, 4, 5);
+ throw 'Should have thrown RangeError';
+ } catch (e) {
+ assertTrue(e instanceof RangeError);
+ }
+ // Check smi boundary
+ var bigNum = (1 << 30) - 3;
+ var array = new Array(bigNum);
+ array.splice(-1, 0, 1, 2, 3, 4, 5, 6, 7);
+ assertEquals(bigNum + 7, array.length);
}
-}
-
-// Lets see if performance is reasonable.
-
-var a = new Array(LARGE + 10);
-for (var i = 0; i < a.length; i += 1000) {
- a[i] = i;
-}
-
-// Take something near the end of the array.
-for (var i = 0; i < 100; i++) {
- var top = a.splice(LARGE, 5);
- assertEquals(5, top.length);
- assertEquals(LARGE, top[0]);
- assertEquals("undefined", typeof(top[1]));
- assertEquals(LARGE + 5, a.length);
- a.splice(LARGE, 0, LARGE);
- a.length = LARGE + 10;
-}
-
-var a = new Array(LARGE + 10);
-for (var i = 0; i < a.length; i += fourhundredth) {
- a[i] = i;
-}
-
-// Take something near the middle of the array.
-for (var i = 0; i < 10; i++) {
- var top = a.splice(LARGE >> 1, 5);
- assertEquals(5, top.length);
- assertEquals(LARGE >> 1, top[0]);
- assertEquals("undefined", typeof(top[1]));
- assertEquals(LARGE + 5, a.length);
- a.splice(LARGE >> 1, 0, LARGE >> 1, void 0, void 0, void 0, void 0);
-}
-
-
-// Test http://b/issue?id=1202711
-arr = [0];
-arr.length = 2;
-Array.prototype[1] = 1;
-assertEquals(1, arr.pop());
-assertEquals(0, arr.pop());
-Array.prototype[1] = undefined;
-
-// Test http://code.google.com/p/chromium/issues/detail?id=21860
-Array.prototype.push.apply([], [1].splice(0, -(-1 % 5)));
+})();
diff --git a/deps/v8/test/mjsunit/array-unshift.js b/deps/v8/test/mjsunit/array-unshift.js
new file mode 100644
index 000000000..06a78a7d9
--- /dev/null
+++ b/deps/v8/test/mjsunit/array-unshift.js
@@ -0,0 +1,132 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Check that unshifting array of holes keeps the original array
+// as array of holes
+(function() {
+ var array = new Array(10);
+ assertEquals(13, array.unshift('1st', '2ns', '3rd'));
+ assertTrue(0 in array);
+ assertTrue(1 in array);
+ assertTrue(2 in array);
+ assertFalse(3 in array);
+})();
+
+
+// Check that unshif with no args has a side-effect of
+// feeling the holes with elements from the prototype
+// (if present, of course)
+(function() {
+ var len = 3;
+ var array = new Array(len);
+
+ var at0 = '@0';
+ var at2 = '@2';
+
+ Array.prototype[0] = at0;
+ Array.prototype[2] = at2;
+
+ // array owns nothing...
+ assertFalse(array.hasOwnProperty(0));
+ assertFalse(array.hasOwnProperty(1));
+ assertFalse(array.hasOwnProperty(2));
+
+ // ... but sees values from Array.prototype
+ assertEquals(array[0], at0);
+ assertEquals(array[1], undefined);
+ assertEquals(array[2], at2);
+
+ assertEquals(len, array.unshift());
+
+ assertTrue(delete Array.prototype[0]);
+ assertTrue(delete Array.prototype[2]);
+
+ // unshift makes array own 0 and 2...
+ assertTrue(array.hasOwnProperty(0));
+ assertFalse(array.hasOwnProperty(1));
+ assertTrue(array.hasOwnProperty(2));
+
+ // ... so they are not affected be delete.
+ assertEquals(array[0], at0);
+ assertEquals(array[1], undefined);
+ assertEquals(array[2], at2);
+})();
+
+
+// Now check the case with array of holes and some elements on prototype.
+(function() {
+ var len = 9;
+ var array = new Array(len);
+ Array.prototype[3] = "@3";
+ Array.prototype[7] = "@7";
+
+ assertEquals(len, array.length);
+ for (var i = 0; i < array.length; i++) {
+ assertEquals(array[i], Array.prototype[i]);
+ }
+
+ assertEquals(len + 1, array.unshift('head'));
+
+ assertEquals(len + 1, array.length);
+ // Note that unshift copies values from prototype into the array.
+ assertEquals(array[4], Array.prototype[3]);
+ assertTrue(array.hasOwnProperty(4));
+
+ assertEquals(array[8], Array.prototype[7]);
+ assertTrue(array.hasOwnProperty(8));
+
+ // ... but keeps the rest as holes:
+ Array.prototype[5] = "@5";
+ assertEquals(array[5], Array.prototype[5]);
+ assertFalse(array.hasOwnProperty(5));
+
+ assertEquals(array[3], Array.prototype[3]);
+ assertFalse(array.hasOwnProperty(3));
+
+ assertEquals(array[7], Array.prototype[7]);
+ assertFalse(array.hasOwnProperty(7));
+
+ assertTrue(delete Array.prototype[3]);
+ assertTrue(delete Array.prototype[5]);
+ assertTrue(delete Array.prototype[7]);
+})();
+
+// Check the behaviour when approaching maximal values for length.
+(function() {
+ for (var i = 0; i < 7; i++) {
+ try {
+ new Array((1 << 32) - 3).unshift(1, 2, 3, 4, 5);
+ throw 'Should have thrown RangeError';
+ } catch (e) {
+ assertTrue(e instanceof RangeError);
+ }
+
+ // Check smi boundary
+ var bigNum = (1 << 30) - 3;
+ assertEquals(bigNum + 7, new Array(bigNum).unshift(1, 2, 3, 4, 5, 6, 7));
+ }
+})();
diff --git a/deps/v8/test/mjsunit/bugs/618.js b/deps/v8/test/mjsunit/bugs/618.js
new file mode 100644
index 000000000..afa9929a6
--- /dev/null
+++ b/deps/v8/test/mjsunit/bugs/618.js
@@ -0,0 +1,86 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Simple class using inline constructor.
+function C1() {
+ this.x = 23;
+};
+var c1 = new C1();
+assertEquals(23, c1.x);
+assertEquals("undefined", typeof c1.y);
+
+// Add setter somewhere on the prototype chain after having constructed the
+// first instance.
+C1.prototype = { set x(value) { this.y = 23; } };
+var c1 = new C1();
+assertEquals("undefined", typeof c1.x);
+assertEquals(23, c1.y);
+
+// Simple class using inline constructor.
+function C2() {
+ this.x = 23;
+};
+var c2 = new C2();
+assertEquals(23, c2.x);
+assertEquals("undefined", typeof c2.y);
+
+// Add setter somewhere on the prototype chain after having constructed the
+// first instance.
+C2.prototype.__proto__ = { set x(value) { this.y = 23; } };
+var c2 = new C2();
+assertEquals("undefined", typeof c2.x);
+assertEquals(23, c2.y);
+
+// Simple class using inline constructor.
+function C3() {
+ this.x = 23;
+};
+var c3 = new C3();
+assertEquals(23, c3.x);
+assertEquals("undefined", typeof c3.y);
+
+// Add setter somewhere on the prototype chain after having constructed the
+// first instance.
+C3.prototype.__defineSetter__('x', function(value) { this.y = 23; });
+var c3 = new C3();
+assertEquals("undefined", typeof c3.x);
+assertEquals(23, c3.y);
+
+// Simple class using inline constructor.
+function C4() {
+ this.x = 23;
+};
+var c4 = new C4();
+assertEquals(23, c4.x);
+assertEquals("undefined", typeof c4.y);
+
+// Add setter somewhere on the prototype chain after having constructed the
+// first instance.
+C4.prototype.__proto__.__defineSetter__('x', function(value) { this.y = 23; });
+var c4 = new C4();
+assertEquals("undefined", typeof c4.x);
+assertEquals(23, c4.y);
diff --git a/deps/v8/test/mjsunit/codegen-coverage.js b/deps/v8/test/mjsunit/codegen-coverage.js
index d5e7769d7..42c371ba2 100644
--- a/deps/v8/test/mjsunit/codegen-coverage.js
+++ b/deps/v8/test/mjsunit/codegen-coverage.js
@@ -25,64 +25,110 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Test the paths in the code generator where values in specific
-// registers get moved around so that the shift operation can use
-// register ECX on ia32 for the shift amount. Other codegen coverage
-// tests should go here too.
-
-
+// Flags: --nofull-compiler --nofast-compiler
+// Test paths in the code generator where values in specific registers
+// get moved around.
function identity(x) {
return x;
}
function cover_codegen_paths() {
var x = 1;
- var a; // Register EAX
- var b; // Register EBX
- var c; // Register ECX
- var d; // Register EDX
- // Register ESI is already used.
- var di; // Register EDI
+
+ // This test depends on the fixed order of register allocation. We try to
+ // get values in specific registers (ia32, x64):
+ var a; // Register eax, rax.
+ var b; // Register ebx, rbx.
+ var c; // Register ecx, rcx.
+ var d; // Register edx, rdx.
+ var di; // Register edi, rdi.
while (x == 1) {
+ // The call will spill registers and leave x in {eax,rax}.
x = identity(1);
+ // The add will spill x and reuse {eax,rax} for the result.
a = x + 1;
+ // A fresh register {ebx,rbx} will be allocated for x, then reused for
+ // the result.
+ b = x + 1;
+ // Et cetera.
c = x + 1;
d = x + 1;
- b = x + 1;
di = x + 1;
// Locals are in the corresponding registers here.
- assertEquals(c << a, 8);
+ assertEquals(8, c << a);
x = identity(1);
a = x + 1;
+ b = x + 1;
c = x + 1;
d = x + 1;
- b = x + 1;
di = x + 1;
- // Locals are in the corresponding registers here.
- assertEquals(a << c, 8);
+ assertEquals(8, a << c);
x = identity(1);
a = x + 1;
+ b = x + 1;
c = x + 1;
d = x + 1;
- b = x + 1;
di = x + 1;
- // Locals are in the corresponding registers here.
c = 0; // Free register ecx.
- assertEquals(a << d, 8);
+ assertEquals(8, a << d);
x = identity(1);
a = x + 1;
+ b = x + 1;
c = x + 1;
d = x + 1;
- b = x + 1;
di = x + 1;
- // Locals are in the corresponding registers here.
b = 0; // Free register ebx.
- assertEquals(a << d, 8);
+ assertEquals(8, a << d);
+
+ // Test the non-commutative subtraction operation with a smi on the
+ // left, all available registers on the right, and a non-smi result.
+ x = identity(-1073741824); // Least (31-bit) smi.
+ a = x + 1; // Still a smi, the greatest smi negated.
+ b = x + 1;
+ c = x + 1;
+ d = x + 1;
+ di = x + 1;
+ // Subtraction should overflow the 31-bit smi range. The result
+ // (1073741824) is outside the 31-bit smi range so it doesn't hit the
+ // "unsafe smi" code that spills a register.
+ assertEquals(1073741824, 1 - a);
+
+ x = identity(-1073741824);
+ a = x + 1;
+ b = x + 1;
+ c = x + 1;
+ d = x + 1;
+ di = x + 1;
+ assertEquals(1073741824, 1 - b);
+
+ x = identity(-1073741824);
+ a = x + 1;
+ b = x + 1;
+ c = x + 1;
+ d = x + 1;
+ di = x + 1;
+ assertEquals(1073741824, 1 - c);
+
+ x = identity(-1073741824);
+ a = x + 1;
+ b = x + 1;
+ c = x + 1;
+ d = x + 1;
+ di = x + 1;
+ assertEquals(1073741824, 1 - d);
+
+ x = identity(-1073741824);
+ a = x + 1;
+ b = x + 1;
+ c = x + 1;
+ d = x + 1;
+ di = x + 1;
+ assertEquals(1073741824, 1 - di);
x = 3;
}
diff --git a/deps/v8/test/mjsunit/compiler/assignment.js b/deps/v8/test/mjsunit/compiler/assignment.js
new file mode 100644
index 000000000..ee2d32378
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/assignment.js
@@ -0,0 +1,264 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Tests for compound assignments at the top level
+
+z = 2;
+z += 4;
+
+assertEquals(z, 6);
+
+a = new Array(10);
+
+a[2] += 7;
+a[2] = 15;
+a[2] += 2;
+
+assertEquals(17, a[2]);
+
+b = new Object();
+b.foo = 5;
+b.foo += 12;
+
+assertEquals(17, b.foo);
+
+// Test compound assignments in an anonymous function with local variables.
+(function () {
+ var z = 2;
+ z += 4;
+
+ assertEquals(z, 6);
+
+ var a = new Array(10);
+
+ a[2] += 7;
+ a[2] = 15;
+ a[2] += 2;
+
+ assertEquals(17, a[2]);
+
+ var b = new Object();
+ b.foo = 5;
+ b.foo += 12;
+
+ assertEquals(17, b.foo);
+})();
+
+// Test compound assignments in an anonymous function with global variables.
+(function () {
+ z = 2;
+ z += 4;
+
+ assertEquals(z, 6);
+
+ a = new Array(10);
+
+ a[2] += 7;
+ a[2] = 15;
+ a[2] += 2;
+
+ assertEquals(17, a[2]);
+
+ b = new Object();
+ b.foo = 5;
+ b.foo += 12;
+
+ assertEquals(17, b.foo);
+})();
+
+// Test compound assignments in a named function with local variables.
+function foo() {
+ var z = 3;
+ z += 4;
+
+ assertEquals(z, 7);
+
+ var a = new Array(10);
+
+ a[2] += 7;
+ a[2] = 15;
+ a[2] += 2;
+
+ assertEquals(17, a[2]);
+
+ var b = new Object();
+ b.foo = 5;
+ b.foo += 12;
+
+ assertEquals(17, b.foo);
+}
+
+foo();
+
+// Test compound assignments in a named function with global variables.
+function bar() {
+ z = 2;
+ z += 5;
+
+ assertEquals(z, 7);
+
+ a = new Array(10);
+
+ a[2] += 7;
+ a[2] = 15;
+ a[2] += 2;
+
+ assertEquals(17, a[2]);
+
+ b = new Object();
+ b.foo = 5;
+ b.foo += 12;
+
+ assertEquals(17, b.foo);
+}
+
+bar();
+
+// Entire series of tests repeated, in loops.
+// -------------------------------------------
+// Tests for compound assignments in a loop at the top level
+
+for (i = 0; i < 5; ++i) {
+ z = 2;
+ z += 4;
+
+ assertEquals(z, 6);
+
+ a = new Array(10);
+
+ a[2] += 7;
+ a[2] = 15;
+ a[2] += 2;
+
+ assertEquals(17, a[2]);
+
+ b = new Object();
+ b.foo = 5;
+ b.foo += 12;
+
+ assertEquals(17, b.foo);
+}
+
+// Test compound assignments in an anonymous function with local variables.
+(function () {
+ for (var i = 0; i < 5; ++i) {
+ var z = 2;
+ z += 4;
+
+ assertEquals(z, 6);
+
+ var a = new Array(10);
+
+ a[2] += 7;
+ a[2] = 15;
+ a[2] += 2;
+
+ assertEquals(17, a[2]);
+
+ var b = new Object();
+ b.foo = 5;
+ b.foo += 12;
+
+ assertEquals(17, b.foo);
+ }
+})();
+
+// Test compound assignments in an anonymous function with global variables.
+(function () {
+ for (i = 0; i < 5; ++i) {
+ z = 2;
+ z += 4;
+
+ assertEquals(z, 6);
+
+ a = new Array(10);
+
+ a[2] += 7;
+ a[2] = 15;
+ a[2] += 2;
+
+ assertEquals(17, a[2]);
+
+ b = new Object();
+ b.foo = 5;
+ b.foo += 12;
+
+ assertEquals(17, b.foo);
+ }
+})();
+
+// Test compound assignments in a named function with local variables.
+function foo_loop() {
+ for (i = 0; i < 5; ++i) {
+ var z = 3;
+ z += 4;
+
+ assertEquals(z, 7);
+
+ var a = new Array(10);
+
+ a[2] += 7;
+ a[2] = 15;
+ a[2] += 2;
+
+ assertEquals(17, a[2]);
+
+ var b = new Object();
+ b.foo = 5;
+ b.foo += 12;
+
+ assertEquals(17, b.foo);
+ }
+}
+
+foo_loop();
+
+// Test compound assignments in a named function with global variables.
+function bar_loop() {
+ for (i = 0; i < 5; ++i) {
+ z = 2;
+ z += 5;
+
+ assertEquals(z, 7);
+
+ a = new Array(10);
+
+ a[2] += 7;
+ a[2] = 15;
+ a[2] += 2;
+
+ assertEquals(17, a[2]);
+
+ b = new Object();
+ b.foo = 5;
+ b.foo += 12;
+
+ assertEquals(17, b.foo);
+ }
+}
+
+bar_loop();
diff --git a/deps/v8/test/mjsunit/compiler/simple-bailouts.js b/deps/v8/test/mjsunit/compiler/simple-bailouts.js
new file mode 100644
index 000000000..af80b7f05
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/simple-bailouts.js
@@ -0,0 +1,127 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --fast-compiler
+
+function Test() {
+ this.result = 0;
+ this.x = 0;
+ this.y = 0;
+ this.z = 0;
+}
+var a = 1;
+var b = 2;
+var c = 4;
+var d = 8;
+
+// Test operations expected to stay on the fast path. Enumerate all binary
+// trees with <= 4 leaves.
+Test.prototype.test0 = function () {
+ this.result = a | b;
+};
+
+Test.prototype.test1 = function() {
+ this.result = (a | b) | c;
+};
+
+Test.prototype.test2 = function() {
+ this.result = a | (b | c);
+};
+
+Test.prototype.test3 = function() {
+ this.result = ((a | b) | c) | d;
+};
+
+Test.prototype.test4 = function() {
+ this.result = (a | (b | c)) | d;
+};
+
+Test.prototype.test5 = function() {
+ this.result = (a | b) | (c | d);
+};
+
+Test.prototype.test6 = function() {
+ this.result = a | ((b | c) | d);
+};
+
+Test.prototype.test7 = function() {
+ this.result = a | (b | (c | d));
+};
+
+// These tests should fail if we bailed out to the beginning of the full
+// code.
+Test.prototype.test8 = function () {
+ // If this.x = 1 and a = 1.1:
+ this.y = this.x | b; // Should be (1 | 2) == 3.
+ this.x = c; // Should be 4.
+ this.z = this.x | a; // Should be (4 | 1.1) == 5.
+};
+
+Test.prototype.test9 = function() {
+ // If this.x = 2 and a = 1.1:
+ this.z = // (14 | 1.1) == 15
+ (this.x = // (6 | 8) == 14
+ (this.y = // (2 | 4) == 6
+ this.x // 2
+ | c) // 4
+ | d) // 8
+ | a; // 1.1
+}
+
+var t = new Test();
+
+t.test0();
+assertEquals(3, t.result);
+
+t.test1();
+assertEquals(7, t.result);
+t.test2();
+assertEquals(7, t.result);
+
+t.test3();
+assertEquals(15, t.result);
+t.test4();
+assertEquals(15, t.result);
+t.test5();
+assertEquals(15, t.result);
+t.test6();
+assertEquals(15, t.result);
+t.test7();
+assertEquals(15, t.result);
+
+a = 1.1;
+t.x = 1;
+t.test8();
+assertEquals(4, t.x);
+assertEquals(3, t.y);
+assertEquals(5, t.z);
+
+t.x = 2;
+t.test9();
+assertEquals(14, t.x);
+assertEquals(6, t.y);
+assertEquals(15, t.z);
diff --git a/deps/v8/test/mjsunit/compiler/simple-binary-op.js b/deps/v8/test/mjsunit/compiler/simple-binary-op.js
new file mode 100644
index 000000000..15e1a559c
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/simple-binary-op.js
@@ -0,0 +1,40 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --fast-compiler
+
+var a = 1;
+var b = 2;
+var c = 4;
+
+function f() { this.x = this.x | a | b | c | a | c; }
+
+var o = {x:0, g:f}
+
+o.g();
+
+assertEquals(7, o.x);
diff --git a/deps/v8/test/mjsunit/compiler/simple-global-access.js b/deps/v8/test/mjsunit/compiler/simple-global-access.js
new file mode 100644
index 000000000..35746ba82
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/simple-global-access.js
@@ -0,0 +1,53 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --fast-compiler
+
+// Test global variable loads with the fast compiler.
+var g1 = 42;
+var g2 = 43;
+var g3 = 44;
+this.__defineGetter__("g4", function () { return 45; });
+
+function f1() { this.x = this.y = this.z = g1; }
+function f2() { this.x = g1; this.y = g2; this.z = g3; }
+function f3() { this.x = g4; }
+
+var o = { x:0, y:0, z:0, test1:f1, test2:f2, test3:f3 }
+
+o.test1();
+assertEquals(42, o.x);
+assertEquals(42, o.y);
+assertEquals(42, o.z);
+
+o.test2();
+assertEquals(42, o.x);
+assertEquals(43, o.y);
+assertEquals(44, o.z);
+
+o.test3();
+assertEquals(45, o.x);
diff --git a/deps/v8/test/mjsunit/compiler/this-property-refs.js b/deps/v8/test/mjsunit/compiler/this-property-refs.js
new file mode 100644
index 000000000..5e8ea596c
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/this-property-refs.js
@@ -0,0 +1,64 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --fast-compiler
+
+// Test references to properties of this.
+function Test() {
+ this.a = 0;
+ this.b = 1;
+ this.c = 2;
+ this.d = 3;
+}
+
+Test.prototype.test0 = function () {
+ this.a = this.b;
+};
+
+Test.prototype.test1 = function() {
+ this.a = this.b = this.c;
+};
+
+Test.prototype.test2 = function() {
+ this.c = this.d;
+ this.b = this.c;
+ this.a = this.b;
+};
+
+var t = new Test();
+
+t.test0();
+assertEquals(1, t.a);
+
+t.test1();
+assertEquals(2, t.a);
+assertEquals(2, t.b);
+
+t.test2();
+assertEquals(3, t.a);
+assertEquals(3, t.b);
+assertEquals(3, t.c);
diff --git a/deps/v8/test/mjsunit/debug-compile-event.js b/deps/v8/test/mjsunit/debug-compile-event.js
index 071183bf6..e7ecf47ec 100644
--- a/deps/v8/test/mjsunit/debug-compile-event.js
+++ b/deps/v8/test/mjsunit/debug-compile-event.js
@@ -90,6 +90,11 @@ function listener(event, exec_state, event_data, data) {
var json = event_data.toJSONProtocol();
var msg = eval('(' + json + ')');
assertTrue('context' in msg.body.script);
+
+ // Check that we pick script name from //@ sourceURL, iff present
+ assertEquals(current_source.indexOf('sourceURL') >= 0 ?
+ 'myscript.js' : undefined,
+ event_data.script().name());
}
} catch (e) {
exception = e
@@ -109,6 +114,7 @@ compileSource('eval("eval(\'(function(){return a;})\')")');
source_count += 2; // Using eval causes additional compilation event.
compileSource('JSON.parse(\'{"a":1,"b":2}\')');
source_count++; // Using JSON.parse causes additional compilation event.
+compileSource('x=1; //@ sourceURL=myscript.js');
// Make sure that the debug event listener was invoked.
assertFalse(exception, "exception in listener")
diff --git a/deps/v8/test/mjsunit/div-mod.js b/deps/v8/test/mjsunit/div-mod.js
index b3c77e1da..1d352b556 100644
--- a/deps/v8/test/mjsunit/div-mod.js
+++ b/deps/v8/test/mjsunit/div-mod.js
@@ -154,4 +154,18 @@ function compute_mod(dividend, divisor) {
doTest(-a,-b);
}
}
-})()
+})();
+
+
+(function () {
+ // Edge cases
+ var zero = 0;
+ var minsmi32 = -0x40000000;
+ var minsmi64 = -0x80000000;
+ var somenum = 3532;
+ assertEquals(-0, zero / -1, "0 / -1");
+ assertEquals(1, minsmi32 / -0x40000000, "minsmi/minsmi-32");
+ assertEquals(1, minsmi64 / -0x80000000, "minsmi/minsmi-64");
+ assertEquals(somenum, somenum % -0x40000000, "%minsmi-32");
+ assertEquals(somenum, somenum % -0x80000000, "%minsmi-64");
+})();
diff --git a/deps/v8/test/mjsunit/fuzz-natives.js b/deps/v8/test/mjsunit/fuzz-natives.js
index d906eb8a4..e2f601eb9 100644
--- a/deps/v8/test/mjsunit/fuzz-natives.js
+++ b/deps/v8/test/mjsunit/fuzz-natives.js
@@ -27,6 +27,9 @@
// Flags: --allow-natives-syntax
+var RUN_WITH_ALL_ARGUMENT_ENTRIES = false;
+var kOnManyArgumentsRemove = 5;
+
function makeArguments() {
var result = [ ];
result.push(17);
@@ -74,13 +77,23 @@ function testArgumentTypes(name, argc) {
var func = makeFunction(name, argc);
while (hasMore) {
var argPool = makeArguments();
+ // When we have 5 or more arguments we lower the amount of tests cases
+ // by randomly removing kOnManyArgumentsRemove entries
+ var numArguments = RUN_WITH_ALL_ARGUMENT_ENTRIES ?
+ kArgObjects : kArgObjects-kOnManyArgumentsRemove;
+ if (argc >= 5 && !RUN_WITH_ALL_ARGUMENT_ENTRIES) {
+ for (var i = 0; i < kOnManyArgumentsRemove; i++) {
+ var rand = Math.floor(Math.random() * (kArgObjects - i));
+ argPool.splice(rand,1);
+ }
+ }
var current = type;
var hasMore = false;
var argList = [ ];
for (var i = 0; i < argc; i++) {
- var index = current % kArgObjects;
- current = (current / kArgObjects) << 0;
- if (index != (kArgObjects - 1))
+ var index = current % numArguments;
+ current = (current / numArguments) << 0;
+ if (index != (numArguments - 1))
hasMore = true;
argList.push(argPool[index]);
}
diff --git a/deps/v8/test/mjsunit/json.js b/deps/v8/test/mjsunit/json.js
index 56562e769..85457cd6e 100644
--- a/deps/v8/test/mjsunit/json.js
+++ b/deps/v8/test/mjsunit/json.js
@@ -200,8 +200,10 @@ TestInvalid('"Unterminated string');
TestInvalid('"Unterminated string\\"');
TestInvalid('"Unterminated string\\\\\\"');
-// Test bad JSON that would be good JavaScript (ES5).
+// JavaScript RegExp literals not valid in JSON.
+TestInvalid('/true/');
+// Test bad JSON that would be good JavaScript (ES5).
TestInvalid("{true:42}");
TestInvalid("{false:42}");
TestInvalid("{null:42}");
@@ -211,7 +213,6 @@ TestInvalid("{0:42}");
TestInvalid("{-1:42}");
// Test for trailing garbage detection.
-
TestInvalid('42 px');
TestInvalid('42 .2');
TestInvalid('42 2');
@@ -277,8 +278,35 @@ assertEquals('{\n "a": "b",\n "c": "d"\n}',
JSON.stringify({a:"b",c:"d"}, null, 1));
assertEquals('{"y":6,"x":5}', JSON.stringify({x:5,y:6}, ['y', 'x']));
+// The gap is capped at ten characters if specified as string.
+assertEquals('{\n "a": "b",\n "c": "d"\n}',
+ JSON.stringify({a:"b",c:"d"}, null,
+ " /*characters after 10th*/"));
+
+//The gap is capped at ten characters if specified as number.
+assertEquals('{\n "a": "b",\n "c": "d"\n}',
+ JSON.stringify({a:"b",c:"d"}, null, 15));
+
+// Replaced wrapped primitives are unwrapped.
+function newx(k, v) { return (k == "x") ? new v(42) : v; }
+assertEquals('{"x":"42"}', JSON.stringify({x: String}, newx));
+assertEquals('{"x":42}', JSON.stringify({x: Number}, newx));
+assertEquals('{"x":true}', JSON.stringify({x: Boolean}, newx));
+
assertEquals(undefined, JSON.stringify(undefined));
assertEquals(undefined, JSON.stringify(function () { }));
+// Arrays with missing, undefined or function elements have those elements
+// replaced by null.
+assertEquals("[null,null,null]",
+ JSON.stringify([undefined,,function(){}]));
+
+// Objects with undefined or function properties (including replaced properties)
+// have those properties ignored.
+assertEquals('{}',
+ JSON.stringify({a: undefined, b: function(){}, c: 42, d: 42},
+ function(k, v) { if (k == "c") return undefined;
+ if (k == "d") return function(){};
+ return v; }));
TestInvalid('1); throw "foo"; (1');
diff --git a/deps/v8/test/mjsunit/mjsunit.status b/deps/v8/test/mjsunit/mjsunit.status
index f1752b9f5..7cb2416f0 100644
--- a/deps/v8/test/mjsunit/mjsunit.status
+++ b/deps/v8/test/mjsunit/mjsunit.status
@@ -64,3 +64,7 @@ array-splice: PASS || TIMEOUT
# Skip long running test in debug mode on ARM.
string-indexof-2: PASS, SKIP if $mode == debug
+[ $arch == mips ]
+
+# Skip all tests on MIPS.
+*: SKIP
diff --git a/deps/v8/test/mjsunit/object-define-properties.js b/deps/v8/test/mjsunit/object-define-properties.js
new file mode 100644
index 000000000..6b3725b39
--- /dev/null
+++ b/deps/v8/test/mjsunit/object-define-properties.js
@@ -0,0 +1,56 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Tests the Object.defineProperties method - ES 15.2.3.7
+// Note that the internal DefineOwnProperty method is tested through
+// object-define-property.js, this file only contains tests specific for
+// Object.defineProperties. Also note that object-create.js contains
+// a range of indirect tests on this method since Object.create uses
+// Object.defineProperties as a step in setting up the object.
+
+// Try defining with null as descriptor:
+try {
+ Object.defineProperties({}, null);
+} catch(e) {
+ assertTrue(/null to object/.test(e));
+}
+
+// Try defining with null as object
+try {
+ Object.defineProperties(null, {});
+} catch(e) {
+ assertTrue(/called on non-object/.test(e));
+}
+
+
+var desc = {foo: {value: 10}, bar: {get: function() {return 42; }}};
+var obj = {};
+// Check that we actually get the object back as returnvalue
+var x = Object.defineProperties(obj, desc);
+
+assertEquals(x.foo, 10);
+assertEquals(x.bar, 42);
diff --git a/deps/v8/test/mjsunit/object-define-property.js b/deps/v8/test/mjsunit/object-define-property.js
new file mode 100644
index 000000000..43b1c7f09
--- /dev/null
+++ b/deps/v8/test/mjsunit/object-define-property.js
@@ -0,0 +1,499 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Tests the object.defineProperty method - ES 15.2.3.6
+
+// Flags: --allow-natives-syntax
+
+// Check that an exception is thrown when null is passed as object.
+try {
+ Object.defineProperty(null, null, null);
+ assertTrue(false);
+} catch (e) {
+ assertTrue(/called on non-object/.test(e));
+}
+
+// Check that an exception is thrown when undefined is passed as object.
+try {
+ Object.defineProperty(undefined, undefined, undefined);
+ assertTrue(false);
+} catch (e) {
+ assertTrue(/called on non-object/.test(e));
+}
+
+// Check that an exception is thrown when non-object is passed as object.
+try {
+ Object.defineProperty(0, "foo", undefined);
+ assertTrue(false);
+} catch (e) {
+ assertTrue(/called on non-object/.test(e));
+}
+
+// Object
+var obj1 = {};
+
+// Values
+var val1 = 0;
+var val2 = 0;
+var val3 = 0;
+
+// Descriptors
+var emptyDesc = {};
+
+var accessorConfigurable = {
+ set: function() { val1++; },
+ get: function() { return val1; },
+ configurable: true
+};
+
+var accessorNoConfigurable = {
+ set: function() { val2++; },
+ get: function() { return val2; },
+ configurable: false
+};
+
+var accessorOnlySet = {
+ set: function() { val3++; },
+ configurable: true
+};
+
+var accessorOnlyGet = {
+ get: function() { return val3; },
+ configurable: true
+};
+
+var accessorDefault = {set: function(){} };
+
+var dataConfigurable = { value: 1000, configurable: true };
+
+var dataNoConfigurable = { value: 2000, configurable: false };
+
+var dataWritable = { value: 3000, writable: true};
+
+
+// Check that we can't add property with undefined attributes.
+try {
+ Object.defineProperty(obj1, "foo", undefined);
+ assertTrue(false);
+} catch (e) {
+ assertTrue(/must be an object/.test(e));
+}
+
+// Make sure that we can add a property with an empty descriptor and
+// that it has the default descriptor values.
+Object.defineProperty(obj1, "foo", emptyDesc);
+
+// foo should be undefined as it has no get, set or value
+assertEquals(undefined, obj1.foo);
+
+// We should, however, be able to retrieve the propertydescriptor which should
+// have all default values (according to 8.6.1).
+var desc = Object.getOwnPropertyDescriptor(obj1, "foo");
+assertFalse(desc.configurable);
+assertFalse(desc.enumerable);
+assertFalse(desc.writable);
+assertEquals(desc.get, undefined);
+assertEquals(desc.set, undefined);
+assertEquals(desc.value, undefined);
+
+// Make sure that getOwnPropertyDescriptor does not return a descriptor
+// with default values if called with non existing property (otherwise
+// the test above is invalid).
+desc = Object.getOwnPropertyDescriptor(obj1, "bar");
+assertEquals(desc, undefined);
+
+// Make sure that foo can't be reset (as configurable is false).
+try {
+ Object.defineProperty(obj1, "foo", accessorConfigurable);
+} catch (e) {
+ assertTrue(/Cannot redefine property/.test(e));
+}
+
+
+// Accessor properties
+
+Object.defineProperty(obj1, "bar", accessorConfigurable);
+desc = Object.getOwnPropertyDescriptor(obj1, "bar");
+assertTrue(desc.configurable);
+assertFalse(desc.enumerable);
+assertEquals(desc.writable, undefined);
+assertEquals(desc.get, accessorConfigurable.get);
+assertEquals(desc.set, accessorConfigurable.set);
+assertEquals(desc.value, undefined);
+assertEquals(1, obj1.bar = 1);
+assertEquals(1, val1);
+assertEquals(1, obj1.bar = 1);
+assertEquals(2, val1);
+assertEquals(2, obj1.bar);
+
+// Redefine bar with non configurable test
+Object.defineProperty(obj1, "bar", accessorNoConfigurable);
+desc = Object.getOwnPropertyDescriptor(obj1, "bar");
+assertFalse(desc.configurable);
+assertFalse(desc.enumerable);
+assertEquals(desc.writable, undefined);
+assertEquals(desc.get, accessorNoConfigurable.get);
+assertEquals(desc.set, accessorNoConfigurable.set);
+assertEquals(desc.value, undefined);
+assertEquals(1, obj1.bar = 1);
+assertEquals(2, val1);
+assertEquals(1, val2);
+assertEquals(1, obj1.bar = 1)
+assertEquals(2, val1);
+assertEquals(2, val2);
+assertEquals(2, obj1.bar);
+
+// Try to redefine bar again - should fail as configurable is false.
+try {
+ Object.defineProperty(obj1, "bar", accessorConfigurable);
+ assertTrue(false);
+} catch(e) {
+ assertTrue(/Cannot redefine property/.test(e));
+}
+
+// Try to redefine bar again using the data descriptor - should fail.
+try {
+ Object.defineProperty(obj1, "bar", dataConfigurable);
+ assertTrue(false);
+} catch(e) {
+ assertTrue(/Cannot redefine property/.test(e));
+}
+
+// Redefine using same descriptor - should succeed.
+Object.defineProperty(obj1, "bar", accessorNoConfigurable);
+desc = Object.getOwnPropertyDescriptor(obj1, "bar");
+assertFalse(desc.configurable);
+assertFalse(desc.enumerable);
+assertEquals(desc.writable, undefined);
+assertEquals(desc.get, accessorNoConfigurable.get);
+assertEquals(desc.set, accessorNoConfigurable.set);
+assertEquals(desc.value, undefined);
+assertEquals(1, obj1.bar = 1);
+assertEquals(2, val1);
+assertEquals(3, val2);
+assertEquals(1, obj1.bar = 1)
+assertEquals(2, val1);
+assertEquals(4, val2);
+assertEquals(4, obj1.bar);
+
+// Define an accessor that has only a setter
+Object.defineProperty(obj1, "setOnly", accessorOnlySet);
+desc = Object.getOwnPropertyDescriptor(obj1, "setOnly");
+assertTrue(desc.configurable);
+assertFalse(desc.enumerable);
+assertEquals(desc.set, accessorOnlySet.set);
+assertEquals(desc.writable, undefined);
+assertEquals(desc.value, undefined);
+assertEquals(desc.get, undefined);
+assertEquals(1, obj1.setOnly = 1);
+assertEquals(1, val3);
+
+// Add a getter - should not touch the setter
+Object.defineProperty(obj1, "setOnly", accessorOnlyGet);
+desc = Object.getOwnPropertyDescriptor(obj1, "setOnly");
+assertTrue(desc.configurable);
+assertFalse(desc.enumerable);
+assertEquals(desc.get, accessorOnlyGet.get);
+assertEquals(desc.set, accessorOnlySet.set);
+assertEquals(desc.writable, undefined);
+assertEquals(desc.value, undefined);
+assertEquals(1, obj1.setOnly = 1);
+assertEquals(2, val3);
+
+// The above should also work if redefining just a getter or setter on
+// an existing property with both a getter and a setter.
+Object.defineProperty(obj1, "both", accessorConfigurable);
+
+Object.defineProperty(obj1, "both", accessorOnlySet);
+desc = Object.getOwnPropertyDescriptor(obj1, "both");
+assertTrue(desc.configurable);
+assertFalse(desc.enumerable);
+assertEquals(desc.set, accessorOnlySet.set);
+assertEquals(desc.get, accessorConfigurable.get);
+assertEquals(desc.writable, undefined);
+assertEquals(desc.value, undefined);
+assertEquals(1, obj1.both = 1);
+assertEquals(3, val3);
+
+
+// Data properties
+
+Object.defineProperty(obj1, "foobar", dataConfigurable);
+desc = Object.getOwnPropertyDescriptor(obj1, "foobar");
+assertEquals(obj1.foobar, 1000);
+assertEquals(desc.value, 1000);
+assertTrue(desc.configurable);
+assertFalse(desc.writable);
+assertFalse(desc.enumerable);
+assertEquals(desc.get, undefined);
+assertEquals(desc.set, undefined);
+//Try writing to non writable attribute - should remain 1000
+obj1.foobar = 1001;
+assertEquals(obj1.foobar, 1000);
+
+
+// Redefine to writable descriptor - now writing to foobar should be allowed
+Object.defineProperty(obj1, "foobar", dataWritable);
+desc = Object.getOwnPropertyDescriptor(obj1, "foobar");
+assertEquals(obj1.foobar, 3000);
+assertEquals(desc.value, 3000);
+// Note that since dataWritable does not define configurable the configurable
+// setting from the redefined property (in this case true) is used.
+assertTrue(desc.configurable);
+assertTrue(desc.writable);
+assertFalse(desc.enumerable);
+assertEquals(desc.get, undefined);
+assertEquals(desc.set, undefined);
+// Writing to the property should now be allowed
+obj1.foobar = 1001;
+assertEquals(obj1.foobar, 1001);
+
+
+// Redefine with non configurable data property.
+Object.defineProperty(obj1, "foobar", dataNoConfigurable);
+desc = Object.getOwnPropertyDescriptor(obj1, "foobar");
+assertEquals(obj1.foobar, 2000);
+assertEquals(desc.value, 2000);
+assertFalse(desc.configurable);
+assertFalse(desc.writable);
+assertFalse(desc.enumerable);
+assertEquals(desc.get, undefined);
+assertEquals(desc.set, undefined);
+
+// Try redefine again - shold fail because configurable is now false.
+try {
+ Object.defineProperty(obj1, "foobar", dataConfigurable);
+ assertTrue(false);
+} catch (e) {
+ assertTrue(/Cannot redefine property/.test(e));
+}
+
+// Try redefine again with accessor property - shold also fail.
+try {
+ Object.defineProperty(obj1, "foobar", dataConfigurable);
+ assertTrue(false);
+} catch (e) {
+ assertTrue(/Cannot redefine property/.test(e));
+}
+
+
+// Redifine with the same descriptor - should succeed (step 6).
+Object.defineProperty(obj1, "foobar", dataNoConfigurable);
+desc = Object.getOwnPropertyDescriptor(obj1, "foobar");
+assertEquals(obj1.foobar, 2000);
+assertEquals(desc.value, 2000);
+assertFalse(desc.configurable);
+assertFalse(desc.writable);
+assertFalse(desc.enumerable);
+assertEquals(desc.get, undefined);
+assertEquals(desc.set, undefined);
+
+
+// New object
+var obj2 = {};
+
+// Make accessor - redefine to data
+Object.defineProperty(obj2, "foo", accessorConfigurable);
+
+// Redefine to data property
+Object.defineProperty(obj2, "foo", dataConfigurable);
+desc = Object.getOwnPropertyDescriptor(obj2, "foo");
+assertEquals(obj2.foo, 1000);
+assertEquals(desc.value, 1000);
+assertTrue(desc.configurable);
+assertFalse(desc.writable);
+assertFalse(desc.enumerable);
+assertEquals(desc.get, undefined);
+assertEquals(desc.set, undefined);
+
+
+// Redefine back to accessor
+Object.defineProperty(obj2, "foo", accessorConfigurable);
+desc = Object.getOwnPropertyDescriptor(obj2, "foo");
+assertTrue(desc.configurable);
+assertFalse(desc.enumerable);
+assertEquals(desc.writable, undefined);
+assertEquals(desc.get, accessorConfigurable.get);
+assertEquals(desc.set, accessorConfigurable.set);
+assertEquals(desc.value, undefined);
+assertEquals(1, obj2.foo = 1);
+assertEquals(3, val1);
+assertEquals(4, val2);
+assertEquals(3, obj2.foo);
+
+// Make data - redefine to accessor
+Object.defineProperty(obj2, "bar", dataConfigurable)
+
+// Redefine to accessor property
+Object.defineProperty(obj2, "bar", accessorConfigurable);
+desc = Object.getOwnPropertyDescriptor(obj2, "bar");
+assertTrue(desc.configurable);
+assertFalse(desc.enumerable);
+assertEquals(desc.writable, undefined);
+assertEquals(desc.get, accessorConfigurable.get);
+assertEquals(desc.set, accessorConfigurable.set);
+assertEquals(desc.value, undefined);
+assertEquals(1, obj2.bar = 1);
+assertEquals(4, val1);
+assertEquals(4, val2);
+assertEquals(4, obj2.foo);
+
+// Redefine back to data property
+Object.defineProperty(obj2, "bar", dataConfigurable);
+desc = Object.getOwnPropertyDescriptor(obj2, "bar");
+assertEquals(obj2.bar, 1000);
+assertEquals(desc.value, 1000);
+assertTrue(desc.configurable);
+assertFalse(desc.writable);
+assertFalse(desc.enumerable);
+assertEquals(desc.get, undefined);
+assertEquals(desc.set, undefined);
+
+
+// Redefinition of an accessor defined using __defineGetter__ and
+// __defineSetter__
+function get(){return this.x}
+function set(x){this.x=x};
+
+var obj3 = {x:1000};
+obj3.__defineGetter__("foo", get);
+obj3.__defineSetter__("foo", set);
+
+desc = Object.getOwnPropertyDescriptor(obj3, "foo");
+assertTrue(desc.configurable);
+assertTrue(desc.enumerable);
+assertEquals(desc.writable, undefined);
+assertEquals(desc.get, get);
+assertEquals(desc.set, set);
+assertEquals(desc.value, undefined);
+assertEquals(1, obj3.foo = 1);
+assertEquals(1, obj3.x);
+assertEquals(1, obj3.foo);
+
+// Redefine to accessor property (non configurable) - note that enumerable
+// which we do not redefine should remain the same (true).
+Object.defineProperty(obj3, "foo", accessorNoConfigurable);
+desc = Object.getOwnPropertyDescriptor(obj3, "foo");
+assertFalse(desc.configurable);
+assertTrue(desc.enumerable);
+assertEquals(desc.writable, undefined);
+assertEquals(desc.get, accessorNoConfigurable.get);
+assertEquals(desc.set, accessorNoConfigurable.set);
+assertEquals(desc.value, undefined);
+assertEquals(1, obj3.foo = 1);
+assertEquals(5, val2);
+assertEquals(5, obj3.foo);
+
+
+obj3.__defineGetter__("bar", get);
+obj3.__defineSetter__("bar", set);
+
+
+// Redefine back to data property
+Object.defineProperty(obj3, "bar", dataConfigurable);
+desc = Object.getOwnPropertyDescriptor(obj3, "bar");
+assertEquals(obj3.bar, 1000);
+assertEquals(desc.value, 1000);
+assertTrue(desc.configurable);
+assertFalse(desc.writable);
+assertTrue(desc.enumerable);
+assertEquals(desc.get, undefined);
+assertEquals(desc.set, undefined);
+
+
+var obj4 = {};
+var func = function (){return 42;};
+obj4.bar = func;
+assertEquals(42, obj4.bar());
+
+Object.defineProperty(obj4, "bar", accessorConfigurable);
+desc = Object.getOwnPropertyDescriptor(obj4, "bar");
+assertTrue(desc.configurable);
+assertTrue(desc.enumerable);
+assertEquals(desc.writable, undefined);
+assertEquals(desc.get, accessorConfigurable.get);
+assertEquals(desc.set, accessorConfigurable.set);
+assertEquals(desc.value, undefined);
+assertEquals(1, obj4.bar = 1);
+assertEquals(5, val1);
+assertEquals(5, obj4.bar);
+
+// Make sure an error is thrown when trying to access to redefined function
+try {
+ obj4.bar();
+ assertTrue(false);
+} catch (e) {
+ assertTrue(/is not a function/.test(e));
+}
+
+
+// Test runtime calls to DefineOrRedefineDataProperty and
+// DefineOrRedefineAccessorProperty - make sure we don't
+// crash
+try {
+ %DefineOrRedefineAccessorProperty(0, 0, 0, 0, 0);
+} catch (e) {
+ assertTrue(/illegal access/.test(e));
+}
+
+try {
+ %DefineOrRedefineDataProperty(0, 0, 0, 0);
+} catch (e) {
+ assertTrue(/illegal access/.test(e));
+}
+
+try {
+ %DefineOrRedefineDataProperty(null, null, null, null);
+} catch (e) {
+ assertTrue(/illegal access/.test(e));
+}
+
+try {
+ %DefineOrRedefineAccessorProperty(null, null, null, null, null);
+} catch (e) {
+ assertTrue(/illegal access/.test(e));
+}
+
+try {
+ %DefineOrRedefineDataProperty({}, null, null, null);
+} catch (e) {
+ assertTrue(/illegal access/.test(e));
+}
+
+// Defining properties null should fail even when we have
+// other allowed values
+try {
+ %DefineOrRedefineAccessorProperty(null, 'foo', 0, func, 0);
+} catch (e) {
+ assertTrue(/illegal access/.test(e));
+}
+
+try {
+ %DefineOrRedefineDataProperty(null, 'foo', 0, 0);
+} catch (e) {
+ assertTrue(/illegal access/.test(e));
+}
diff --git a/deps/v8/test/mjsunit/object-get-own-property-names.js b/deps/v8/test/mjsunit/object-get-own-property-names.js
index f52cee2f9..33aa85ef1 100644
--- a/deps/v8/test/mjsunit/object-get-own-property-names.js
+++ b/deps/v8/test/mjsunit/object-get-own-property-names.js
@@ -57,6 +57,8 @@ propertyNames.sort();
assertEquals(3, propertyNames.length);
assertEquals("0", propertyNames[0]);
assertEquals("1", propertyNames[1]);
+assertEquals("string", typeof propertyNames[0]);
+assertEquals("string", typeof propertyNames[1]);
assertEquals("length", propertyNames[2]);
// Check that no proto properties are returned.
diff --git a/deps/v8/test/mjsunit/regress/regress-603.js b/deps/v8/test/mjsunit/regress/regress-603.js
new file mode 100644
index 000000000..7d4c32292
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-603.js
@@ -0,0 +1,49 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Calling non-objects directly or via Function.prototype.call should
+// not mess up the stack.
+// http://code.google.com/p/v8/issues/detail?id=603
+
+function test0() {
+ var re = /b../;
+ return re('abcdefghijklm') + 'z';
+}
+assertEquals('bcdz', test0());
+
+var re1 = /c../;
+re1.call = Function.prototype.call;
+var test1 = re1.call(null, 'abcdefghijklm') + 'z';
+assertEquals('cdez', test1);
+
+var re2 = /d../;
+var test2 = Function.prototype.call.call(re2, null, 'abcdefghijklm') + 'z';
+assertEquals('defz', test2);
+
+var re3 = /e../;
+var test3 = Function.prototype.call.apply(re3, [null, 'abcdefghijklm']) + 'z';
+assertEquals('efgz', test3);
diff --git a/deps/v8/test/mjsunit/regress/regress-612.js b/deps/v8/test/mjsunit/regress/regress-612.js
new file mode 100644
index 000000000..aee6d530b
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-612.js
@@ -0,0 +1,44 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Tests intercation between __defineGetter__/__defineSetter and fast and slow
+// mode of the objects due to series of assignments optimization.
+// (See http://code.google.com/p/v8/issues/detail?id=612)
+
+obj = {}
+
+// Define getter which currently moves object into slow mode.
+obj.__defineGetter__('foobar', function() { return 42; })
+
+// Starts initialization block mode. And turns object into slow mode.
+obj.a = 1
+obj.b = 2;
+obj.c = 3;
+// Now object is turned into fast mode, but it has getter defined above...
+
+// Now assert is triggered.
+obj.__defineGetter__('foobar', function() { return 42; })
diff --git a/deps/v8/test/mjsunit/setter-on-constructor-prototype.js b/deps/v8/test/mjsunit/setter-on-constructor-prototype.js
new file mode 100644
index 000000000..d5718f9c9
--- /dev/null
+++ b/deps/v8/test/mjsunit/setter-on-constructor-prototype.js
@@ -0,0 +1,111 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function RunTest(ensure_fast_case) {
+ function C1() {
+ this.x = 23;
+ };
+ C1.prototype = { set x(value) { this.y = 23; } };
+ if (ensure_fast_case) {
+ %ToFastProperties(C1.prototype);
+ }
+
+ for (var i = 0; i < 10; i++) {
+ var c1 = new C1();
+ assertEquals("undefined", typeof c1.x);
+ assertEquals(23, c1.y);
+ }
+
+
+ function C2() {
+ this.x = 23;
+ };
+ C2.prototype = { };
+ C2.prototype.__proto__ = { set x(value) { this.y = 23; } };
+ if (ensure_fast_case) {
+ %ToFastProperties(C2.prototype.__proto__)
+ }
+
+ for (var i = 0; i < 10; i++) {
+ var c2 = new C2();
+ assertEquals("undefined", typeof c2.x);
+ assertEquals(23, c2.y);
+ }
+
+
+ function C3() {
+ this.x = 23;
+ };
+ C3.prototype = { };
+ C3.prototype.__defineSetter__('x', function(value) { this.y = 23; });
+ if (ensure_fast_case) {
+ %ToFastProperties(C3.prototype);
+ }
+
+ for (var i = 0; i < 10; i++) {
+ var c3 = new C3();
+ assertEquals("undefined", typeof c3.x);
+ assertEquals(23, c3.y);
+ }
+
+
+ function C4() {
+ this.x = 23;
+ };
+ C4.prototype = { };
+ C4.prototype.__proto__ = { };
+ C4.prototype.__proto__.__defineSetter__('x', function(value) { this.y = 23; });
+ if (ensure_fast_case) {
+ %ToFastProperties(C4.prototype.__proto__);
+ }
+
+ for (var i = 0; i < 10; i++) {
+ var c4 = new C4();
+ assertEquals("undefined", typeof c4.x);
+ assertEquals(23, c4.y);
+ }
+
+
+ function D() {
+ this.x = 23;
+ };
+ D.prototype = 1;
+ if (ensure_fast_case) {
+ %ToFastProperties(D.prototype);
+ }
+
+ for (var i = 0; i < 10; i++) {
+ var d = new D();
+ assertEquals(23, d.x);
+ assertEquals("undefined", typeof d.y);
+ }
+}
+
+RunTest(false);
+RunTest(true);
diff --git a/deps/v8/test/mjsunit/substr.js b/deps/v8/test/mjsunit/substr.js
index 8c276f941..f69a9c045 100644..100755
--- a/deps/v8/test/mjsunit/substr.js
+++ b/deps/v8/test/mjsunit/substr.js
@@ -44,9 +44,6 @@ assertEquals(s1, s.substr(1.1));
assertEquals(s1, s.substr({ valueOf: function() { return 1; } }));
assertEquals(s1, s.substr({ toString: function() { return '1'; } }));
-for (var i = 0; i < s.length; i++)
- for (var j = i; j < s.length + 5; j++)
- assertEquals(s.substring(i, j), s.substr(i, j - i));
assertEquals(s.substring(s.length - 1), s.substr(-1));
assertEquals(s.substring(s.length - 1), s.substr(-1.2));
@@ -63,3 +60,78 @@ assertEquals('abcdefghijklmn', s.substr(0, void 0)); // kjs and v8
assertEquals('', s.substr(0, null));
assertEquals(s, s.substr(0, String(s.length)));
assertEquals('a', s.substr(0, true));
+
+
+// Test substrings of different lengths and alignments.
+// First ASCII.
+var x = "ASCII";
+for (var i = 0; i < 25; i++) {
+ x += (i >> 4).toString(16) + (i & 0x0f).toString(16);
+}
+/x/.exec(x); // Try to force a flatten.
+for (var i = 5; i < 25; i++) {
+ for (var j = 0; j < 25; j++) {
+ var z = x.substring(i, i+j);
+ var w = Math.random() * 42; // Allocate something new in new-space.
+ assertEquals(j, z.length);
+ for (var k = 0; k < j; k++) {
+ assertEquals(x.charAt(i+k), z.charAt(k));
+ }
+ }
+}
+
+
+// Then two-byte strings.
+x = "UC16\u2028"; // Non-ascii char forces two-byte string.
+for (var i = 0; i < 25; i++) {
+ x += (i >> 4).toString(16) + (i & 0x0f).toString(16);
+}
+/x/.exec(x); // Try to force a flatten.
+for (var i = 5; i < 25; i++) {
+ for (var j = 0; j < 25; j++) {
+ var z = x.substring(i, i + j);
+ var w = Math.random() * 42; // Allocate something new in new-space.
+ assertEquals(j, z.length);
+ for (var k = 0; k < j; k++) {
+ assertEquals(x.charAt(i+k), z.charAt(k));
+ }
+ }
+}
+
+
+// Keep creating strings to to force allocation failure on substring creation.
+var x = "0123456789ABCDEF";
+x += x; // 2^5
+x += x;
+x += x;
+x += x;
+x += x;
+x += x; // 2^10
+x += x;
+x += x;
+var xl = x.length;
+var cache = [];
+for (var i = 0; i < 10000; i++) {
+ var z = x.substring(i % xl);
+ assertEquals(xl - (i % xl), z.length);
+ cache.push(z);
+}
+
+
+// Same with two-byte strings
+var x = "\u2028123456789ABCDEF";
+x += x; // 2^5
+x += x;
+x += x;
+x += x;
+x += x;
+x += x; // 2^10
+x += x;
+x += x;
+var xl = x.length;
+var cache = [];
+for (var i = 0; i < 10000; i++) {
+ var z = x.substring(i % xl);
+ assertEquals(xl - (i % xl), z.length);
+ cache.push(z);
+}
diff --git a/deps/v8/test/mjsunit/tools/logreader.js b/deps/v8/test/mjsunit/tools/logreader.js
index 8b7478951..485990eaa 100644
--- a/deps/v8/test/mjsunit/tools/logreader.js
+++ b/deps/v8/test/mjsunit/tools/logreader.js
@@ -80,19 +80,3 @@
assertEquals('bbbbaaaa', reader.expandBackRef_('bbbb#2:4'));
assertEquals('"#1:1"', reader.expandBackRef_('"#1:1"'));
})();
-
-
-// See http://code.google.com/p/v8/issues/detail?id=420
-(function testReadingTruncatedLog() {
- // Having an incorrect event in the middle of a log should throw an exception.
- var reader1 = new devtools.profiler.LogReader({});
- assertThrows(function() {
- reader1.processLogChunk('alias,a,b\nxxxx\nalias,c,d\n');
- });
-
- // But having it as the last record should not.
- var reader2 = new devtools.profiler.LogReader({});
- assertDoesNotThrow(function() {
- reader2.processLogChunk('alias,a,b\nalias,c,d\nxxxx');
- });
-})();
diff --git a/deps/v8/test/mjsunit/tools/tickprocessor.js b/deps/v8/test/mjsunit/tools/tickprocessor.js
index abcde897e..30b0ec23f 100644
--- a/deps/v8/test/mjsunit/tools/tickprocessor.js
+++ b/deps/v8/test/mjsunit/tools/tickprocessor.js
@@ -379,9 +379,7 @@ function driveTickProcessorTest(
var tp = new TickProcessor(
new CppEntriesProviderMock(), separateIc, ignoreUnknown, stateFilter);
var pm = new PrintMonitor(testsPath + refOutput);
- tp.processLogFile(testsPath + logInput);
- // Hack file name to avoid dealing with platform specifics.
- tp.lastLogFileName_ = 'v8.log';
+ tp.processLogFileInTest(testsPath + logInput);
tp.printStatistics();
pm.finish();
};
diff --git a/deps/v8/test/mjsunit/typeof.js b/deps/v8/test/mjsunit/typeof.js
index b460fbba9..15ab7bf34 100644
--- a/deps/v8/test/mjsunit/typeof.js
+++ b/deps/v8/test/mjsunit/typeof.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --nofast-compiler
+// Flags: --nofull-compiler
// The type of a regular expression should be 'function', including in
// the context of string equality comparisons.
diff --git a/deps/v8/test/sputnik/sputnik.status b/deps/v8/test/sputnik/sputnik.status
index 16a44c51a..e5b9e2051 100644
--- a/deps/v8/test/sputnik/sputnik.status
+++ b/deps/v8/test/sputnik/sputnik.status
@@ -316,3 +316,8 @@ S15.9.5.9_A1_T2: FAIL_OK
S11.4.3_A3.6: FAIL_OK
S15.10.7_A3_T2: FAIL_OK
S15.10.7_A3_T1: FAIL_OK
+
+[ $arch == mips ]
+
+# Skip all tests on MIPS.
+*: SKIP
diff --git a/deps/v8/tools/gyp/v8.gyp b/deps/v8/tools/gyp/v8.gyp
index f2d1b98ee..9fea73ce8 100644
--- a/deps/v8/tools/gyp/v8.gyp
+++ b/deps/v8/tools/gyp/v8.gyp
@@ -308,6 +308,8 @@
'../../src/jsregexp.h',
'../../src/list-inl.h',
'../../src/list.h',
+ '../../src/liveedit.cc',
+ '../../src/liveedit.h',
'../../src/log-inl.h',
'../../src/log-utils.cc',
'../../src/log-utils.h',
@@ -320,6 +322,7 @@
'../../src/messages.cc',
'../../src/messages.h',
'../../src/natives.h',
+ '../../src/number-info.h',
'../../src/objects-debug.cc',
'../../src/objects-inl.h',
'../../src/objects.cc',
diff --git a/deps/v8/tools/linux-tick-processor b/deps/v8/tools/linux-tick-processor
index ca1c72126..17157050d 100644
--- a/deps/v8/tools/linux-tick-processor
+++ b/deps/v8/tools/linux-tick-processor
@@ -16,8 +16,17 @@ else
[ -x $d8_exec ] || scons -j4 -C $D8_PATH -Y $tools_path/.. d8
fi
+# find the name of the log file to process, it must not start with a dash.
+log_file="v8.log"
+for arg in "$@"
+do
+ if [[ "${arg}" != -* ]]; then
+ log_file=${arg}
+ fi
+done
+
# nm spits out 'no symbols found' messages to stderr.
-$d8_exec $tools_path/splaytree.js $tools_path/codemap.js \
+cat $log_file | $d8_exec $tools_path/splaytree.js $tools_path/codemap.js \
$tools_path/csvparser.js $tools_path/consarray.js \
$tools_path/profile.js $tools_path/profile_view.js \
$tools_path/logreader.js $tools_path/tickprocessor.js \
diff --git a/deps/v8/tools/logreader.js b/deps/v8/tools/logreader.js
index 20a1f5444..b2aca73d2 100644
--- a/deps/v8/tools/logreader.js
+++ b/deps/v8/tools/logreader.js
@@ -76,6 +76,18 @@ devtools.profiler.LogReader = function(dispatchTable) {
* @type {Array.<string>}
*/
this.backRefs_ = [];
+
+ /**
+ * Current line.
+ * @type {number}
+ */
+ this.lineNum_ = 0;
+
+ /**
+ * CSV lines parser.
+ * @type {devtools.profiler.CsvParser}
+ */
+ this.csvParser_ = new devtools.profiler.CsvParser();
};
@@ -136,6 +148,16 @@ devtools.profiler.LogReader.prototype.processLogChunk = function(chunk) {
/**
+ * Processes a line of V8 profiler event log.
+ *
+ * @param {string} line A line of log.
+ */
+devtools.profiler.LogReader.prototype.processLogLine = function(line) {
+ this.processLog_([line]);
+};
+
+
+/**
* Processes stack record.
*
* @param {number} pc Program counter.
@@ -280,25 +302,20 @@ devtools.profiler.LogReader.prototype.processAlias_ = function(
* @private
*/
devtools.profiler.LogReader.prototype.processLog_ = function(lines) {
- var csvParser = new devtools.profiler.CsvParser();
- try {
- for (var i = 0, n = lines.length; i < n; ++i) {
- var line = lines[i];
- if (!line) {
- continue;
- }
+ for (var i = 0, n = lines.length; i < n; ++i, ++this.lineNum_) {
+ var line = lines[i];
+ if (!line) {
+ continue;
+ }
+ try {
if (line.charAt(0) == '#' ||
line.substr(0, line.indexOf(',')) in this.backRefsCommands_) {
line = this.expandBackRef_(line);
}
- var fields = csvParser.parseLine(line);
+ var fields = this.csvParser_.parseLine(line);
this.dispatchLogRow_(fields);
- }
- } catch (e) {
- // An error on the last line is acceptable since log file can be truncated.
- if (i < n - 1) {
- this.printError('line ' + (i + 1) + ': ' + (e.message || e));
- throw e;
+ } catch (e) {
+ this.printError('line ' + (this.lineNum_ + 1) + ': ' + (e.message || e));
}
}
};
diff --git a/deps/v8/tools/tickprocessor.js b/deps/v8/tools/tickprocessor.js
index 35422e2ec..a3e14c3ae 100644
--- a/deps/v8/tools/tickprocessor.js
+++ b/deps/v8/tools/tickprocessor.js
@@ -67,6 +67,9 @@ function SnapshotLogProcessor() {
processor: this.processCodeMove, backrefs: true },
'code-delete': { parsers: [this.createAddressParser('code')],
processor: this.processCodeDelete, backrefs: true },
+ 'function-creation': null,
+ 'function-move': null,
+ 'function-delete': null,
'snapshot-pos': { parsers: [this.createAddressParser('code'), parseInt],
processor: this.processSnapshotPosition, backrefs: true }});
@@ -259,6 +262,16 @@ TickProcessor.prototype.isJsCode = function(name) {
TickProcessor.prototype.processLogFile = function(fileName) {
this.lastLogFileName_ = fileName;
+ var line;
+ while (line = readline()) {
+ this.processLogLine(line);
+ }
+};
+
+
+TickProcessor.prototype.processLogFileInTest = function(fileName) {
+ // Hack file name to avoid dealing with platform specifics.
+ this.lastLogFileName_ = 'v8.log';
var contents = readFile(fileName);
this.processLogChunk(contents);
};
diff --git a/deps/v8/tools/visual_studio/v8_base.vcproj b/deps/v8/tools/visual_studio/v8_base.vcproj
index e58e8ff31..fdb6cd0c6 100644
--- a/deps/v8/tools/visual_studio/v8_base.vcproj
+++ b/deps/v8/tools/visual_studio/v8_base.vcproj
@@ -577,6 +577,14 @@
>
</File>
<File
+ RelativePath="..\..\src\liveedit.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\liveedit.h"
+ >
+ </File>
+ <File
RelativePath="..\..\src\log.cc"
>
</File>
@@ -633,6 +641,10 @@
>
</File>
<File
+ RelativePath="..\..\src\number-info.h"
+ >
+ </File>
+ <File
RelativePath="..\..\src\objects-debug.cc"
>
<FileConfiguration
diff --git a/deps/v8/tools/visual_studio/v8_base_arm.vcproj b/deps/v8/tools/visual_studio/v8_base_arm.vcproj
index 4b37b538c..2602be455 100644
--- a/deps/v8/tools/visual_studio/v8_base_arm.vcproj
+++ b/deps/v8/tools/visual_studio/v8_base_arm.vcproj
@@ -581,6 +581,14 @@
>
</File>
<File
+ RelativePath="..\..\src\liveedit.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\liveedit.h"
+ >
+ </File>
+ <File
RelativePath="..\..\src\log.cc"
>
</File>
@@ -637,6 +645,10 @@
>
</File>
<File
+ RelativePath="..\..\src\number-info.h"
+ >
+ </File>
+ <File
RelativePath="..\..\src\objects-debug.cc"
>
<FileConfiguration
diff --git a/deps/v8/tools/visual_studio/v8_base_x64.vcproj b/deps/v8/tools/visual_studio/v8_base_x64.vcproj
index b6d5c7d82..d3f55c6a0 100644
--- a/deps/v8/tools/visual_studio/v8_base_x64.vcproj
+++ b/deps/v8/tools/visual_studio/v8_base_x64.vcproj
@@ -578,6 +578,14 @@
>
</File>
<File
+ RelativePath="..\..\src\liveedit.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\liveedit.h"
+ >
+ </File>
+ <File
RelativePath="..\..\src\log.cc"
>
</File>
@@ -634,6 +642,10 @@
>
</File>
<File
+ RelativePath="..\..\src\number-info.h"
+ >
+ </File>
+ <File
RelativePath="..\..\src\objects-debug.cc"
>
<FileConfiguration
diff --git a/deps/v8/tools/windows-tick-processor.bat b/deps/v8/tools/windows-tick-processor.bat
index 6743f68b3..33b1f7705 100644
--- a/deps/v8/tools/windows-tick-processor.bat
+++ b/deps/v8/tools/windows-tick-processor.bat
@@ -2,4 +2,28 @@
SET tools_dir=%~dp0
-%tools_dir%..\d8 %tools_dir%splaytree.js %tools_dir%codemap.js %tools_dir%csvparser.js %tools_dir%consarray.js %tools_dir%profile.js %tools_dir%profile_view.js %tools_dir%logreader.js %tools_dir%tickprocessor.js %tools_dir%tickprocessor-driver.js -- --windows %*
+SET log_file=v8.log
+
+rem find the name of the log file to process, it must not start with a dash.
+rem we prepend cmdline args with a number (in fact, any letter or number)
+rem to cope with empty arguments.
+SET arg1=1%1
+IF NOT %arg1:~0,2% == 1 (IF NOT %arg1:~0,2% == 1- SET log_file=%1)
+SET arg2=2%2
+IF NOT %arg2:~0,2% == 2 (IF NOT %arg2:~0,2% == 2- SET log_file=%2)
+SET arg3=3%3
+IF NOT %arg3:~0,2% == 3 (IF NOT %arg3:~0,2% == 3- SET log_file=%3)
+SET arg4=4%4
+IF NOT %arg4:~0,2% == 4 (IF NOT %arg4:~0,2% == 4- SET log_file=%4)
+SET arg5=5%5
+IF NOT %arg5:~0,2% == 5 (IF NOT %arg5:~0,2% == 5- SET log_file=%5)
+SET arg6=6%6
+IF NOT %arg6:~0,2% == 6 (IF NOT %arg6:~0,2% == 6- SET log_file=%6)
+SET arg7=7%7
+IF NOT %arg7:~0,2% == 7 (IF NOT %arg7:~0,2% == 7- SET log_file=%7)
+SET arg8=8%8
+IF NOT %arg8:~0,2% == 8 (IF NOT %arg8:~0,2% == 8- SET log_file=%8)
+SET arg9=9%9
+IF NOT %arg9:~0,2% == 9 (IF NOT %arg9:~0,2% == 9- SET log_file=%9)
+
+type %log_file% | %tools_dir%..\d8 %tools_dir%splaytree.js %tools_dir%codemap.js %tools_dir%csvparser.js %tools_dir%consarray.js %tools_dir%profile.js %tools_dir%profile_view.js %tools_dir%logreader.js %tools_dir%tickprocessor.js %tools_dir%tickprocessor-driver.js -- --windows %*