summaryrefslogtreecommitdiff
path: root/deps
diff options
context:
space:
mode:
authorRyan Dahl <ry@tinyclouds.org>2011-02-28 11:29:33 -0800
committerRyan Dahl <ry@tinyclouds.org>2011-02-28 11:29:33 -0800
commit6442cbef2058019d2e53050f432a80e47c08f517 (patch)
tree2c27a43f5e40a0d072f32639e74142f2b5860478 /deps
parenta14bb04c0500567bc0579da50cc5bc3cb559ba20 (diff)
downloadnode-6442cbef2058019d2e53050f432a80e47c08f517.tar.gz
Upgrade V8 to 3.1.7
Diffstat (limited to 'deps')
-rw-r--r--deps/v8/ChangeLog14
-rw-r--r--deps/v8/SConstruct121
-rwxr-xr-xdeps/v8/src/SConscript3
-rw-r--r--deps/v8/src/arm/code-stubs-arm.cc8
-rw-r--r--deps/v8/src/arm/code-stubs-arm.h2
-rw-r--r--deps/v8/src/arm/full-codegen-arm.cc200
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.cc31
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc2
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h2
-rw-r--r--deps/v8/src/arm/simulator-arm.cc35
-rw-r--r--deps/v8/src/arm/stub-cache-arm.cc50
-rw-r--r--deps/v8/src/array.js9
-rw-r--r--deps/v8/src/assembler.cc2
-rw-r--r--deps/v8/src/assembler.h31
-rw-r--r--deps/v8/src/flag-definitions.h5
-rw-r--r--deps/v8/src/frame-element.h4
-rw-r--r--deps/v8/src/handles-inl.h14
-rw-r--r--deps/v8/src/handles.cc6
-rw-r--r--deps/v8/src/handles.h55
-rw-r--r--deps/v8/src/hydrogen.cc322
-rw-r--r--deps/v8/src/hydrogen.h64
-rw-r--r--deps/v8/src/ia32/codegen-ia32.cc16
-rw-r--r--deps/v8/src/ia32/full-codegen-ia32.cc28
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.cc2
-rw-r--r--deps/v8/src/ia32/virtual-frame-ia32.cc1
-rw-r--r--deps/v8/src/ia32/virtual-frame-ia32.h2
-rw-r--r--deps/v8/src/ic.cc1
-rw-r--r--deps/v8/src/objects-inl.h4
-rw-r--r--deps/v8/src/objects.cc18
-rw-r--r--deps/v8/src/runtime.cc649
-rw-r--r--deps/v8/src/spaces.h6
-rw-r--r--deps/v8/src/version.cc2
-rw-r--r--deps/v8/src/virtual-frame-heavy-inl.h6
-rw-r--r--deps/v8/src/x64/assembler-x64-inl.h2
-rw-r--r--deps/v8/src/x64/assembler-x64.cc24
-rw-r--r--deps/v8/src/x64/assembler-x64.h4
-rw-r--r--deps/v8/src/x64/builtins-x64.cc2
-rw-r--r--deps/v8/src/x64/code-stubs-x64.cc173
-rw-r--r--deps/v8/src/x64/code-stubs-x64.h18
-rw-r--r--deps/v8/src/x64/codegen-x64-inl.h2
-rw-r--r--deps/v8/src/x64/codegen-x64.cc24
-rw-r--r--deps/v8/src/x64/codegen-x64.h2
-rw-r--r--deps/v8/src/x64/cpu-x64.cc2
-rw-r--r--deps/v8/src/x64/debug-x64.cc2
-rw-r--r--deps/v8/src/x64/deoptimizer-x64.cc146
-rw-r--r--deps/v8/src/x64/disasm-x64.cc12
-rw-r--r--deps/v8/src/x64/frames-x64.cc2
-rw-r--r--deps/v8/src/x64/frames-x64.h2
-rw-r--r--deps/v8/src/x64/full-codegen-x64.cc248
-rw-r--r--deps/v8/src/x64/ic-x64.cc2
-rw-r--r--deps/v8/src/x64/jump-target-x64.cc2
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.cc131
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.h1
-rw-r--r--deps/v8/src/x64/lithium-x64.cc6
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc27
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h4
-rw-r--r--deps/v8/src/x64/regexp-macro-assembler-x64.cc2
-rw-r--r--deps/v8/src/x64/regexp-macro-assembler-x64.h2
-rw-r--r--deps/v8/src/x64/simulator-x64.h2
-rw-r--r--deps/v8/src/x64/stub-cache-x64.cc2
-rw-r--r--deps/v8/src/x64/virtual-frame-x64.cc2
-rw-r--r--deps/v8/src/x64/virtual-frame-x64.h4
-rw-r--r--deps/v8/test/cctest/test-api.cc51
-rw-r--r--deps/v8/test/mjsunit/array-concat.js44
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1145.js54
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1172-bis.js37
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1181.js54
67 files changed, 1921 insertions, 886 deletions
diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog
index 1d91fcded..e4b018c83 100644
--- a/deps/v8/ChangeLog
+++ b/deps/v8/ChangeLog
@@ -1,3 +1,17 @@
+2011-02-28: Version 3.1.7
+
+ Fixed a number of crash bugs.
+
+ Improved Crankshaft for x64 and ARM.
+
+ Fixed implementation of indexOf/lastIndexOf for sparse
+ arrays (http://crbug.com/73940).
+
+ Fixed bug in map space compaction (http://crbug.com/59688).
+
+ Added support for direct getter accessors calls on ARM.
+
+
2011-02-24: Version 3.1.6
Fixed a number of crash bugs.
diff --git a/deps/v8/SConstruct b/deps/v8/SConstruct
index 6b155e459..84707e984 100644
--- a/deps/v8/SConstruct
+++ b/deps/v8/SConstruct
@@ -27,7 +27,6 @@
import platform
import re
-import subprocess
import sys
import os
from os.path import join, dirname, abspath
@@ -143,9 +142,6 @@ LIBRARY_FLAGS = {
# Use visibility=default to disable this.
'CXXFLAGS': ['-fvisibility=hidden']
},
- 'strictaliasing:off': {
- 'CCFLAGS': ['-fno-strict-aliasing']
- },
'mode:debug': {
'CCFLAGS': ['-g', '-O0'],
'CPPDEFINES': ['ENABLE_DISASSEMBLER', 'DEBUG'],
@@ -656,16 +652,8 @@ def Abort(message):
sys.exit(1)
-def GuessOS(env):
- return utils.GuessOS()
-
-
-def GuessArch(env):
- return utils.GuessArchitecture()
-
-
-def GuessToolchain(env):
- tools = env['TOOLS']
+def GuessToolchain(os):
+ tools = Environment()['TOOLS']
if 'gcc' in tools:
return 'gcc'
elif 'msvc' in tools:
@@ -674,9 +662,7 @@ def GuessToolchain(env):
return None
-def GuessVisibility(env):
- os = env['os']
- toolchain = env['toolchain'];
+def GuessVisibility(os, toolchain):
if (os == 'win32' or os == 'cygwin') and toolchain == 'gcc':
# MinGW / Cygwin can't do it.
return 'default'
@@ -686,35 +672,27 @@ def GuessVisibility(env):
return 'hidden'
-def GuessStrictAliasing(env):
- # There seems to be a problem with gcc 4.5.x
- # see http://code.google.com/p/v8/issues/detail?id=884
- # it can be worked around by disabling strict aliasing
- toolchain = env['toolchain'];
- if toolchain == 'gcc':
- env = Environment(tools=['gcc'])
- version = subprocess.Popen([env['CC'], '-dumpversion'],
- stdout=subprocess.PIPE).communicate()[0]
- if version.find('4.5.') == 0:
- return 'off'
- return 'default'
+OS_GUESS = utils.GuessOS()
+TOOLCHAIN_GUESS = GuessToolchain(OS_GUESS)
+ARCH_GUESS = utils.GuessArchitecture()
+VISIBILITY_GUESS = GuessVisibility(OS_GUESS, TOOLCHAIN_GUESS)
SIMPLE_OPTIONS = {
'toolchain': {
'values': ['gcc', 'msvc'],
- 'guess': GuessToolchain,
- 'help': 'the toolchain to use'
+ 'default': TOOLCHAIN_GUESS,
+ 'help': 'the toolchain to use (%s)' % TOOLCHAIN_GUESS
},
'os': {
'values': ['freebsd', 'linux', 'macos', 'win32', 'android', 'openbsd', 'solaris', 'cygwin'],
- 'guess': GuessOS,
- 'help': 'the os to build for'
+ 'default': OS_GUESS,
+ 'help': 'the os to build for (%s)' % OS_GUESS
},
'arch': {
'values':['arm', 'ia32', 'x64', 'mips'],
- 'guess': GuessArch,
- 'help': 'the architecture to build for'
+ 'default': ARCH_GUESS,
+ 'help': 'the architecture to build for (%s)' % ARCH_GUESS
},
'regexp': {
'values': ['native', 'interpreted'],
@@ -823,15 +801,8 @@ SIMPLE_OPTIONS = {
},
'visibility': {
'values': ['default', 'hidden'],
- 'guess': GuessVisibility,
- 'depends': ['os', 'toolchain'],
- 'help': 'shared library symbol visibility'
- },
- 'strictaliasing': {
- 'values': ['default', 'off'],
- 'guess': GuessStrictAliasing,
- 'depends': ['toolchain'],
- 'help': 'assume strict aliasing while optimizing'
+ 'default': VISIBILITY_GUESS,
+ 'help': 'shared library symbol visibility (%s)' % VISIBILITY_GUESS
},
'pgo': {
'values': ['off', 'instrument', 'optimize'],
@@ -841,26 +812,6 @@ SIMPLE_OPTIONS = {
}
-def AddOption(result, name, option):
- if 'guess' in option:
- # Option has a guess function
- guess = option.get('guess')
- guess_env = Environment(options=result)
- # Check if all options that the guess function depends on are set
- if 'depends' in option:
- for dependency in option.get('depends'):
- if not dependency in guess_env:
- return False
- default = guess(guess_env)
- else:
- # Option has a fixed default
- default = option.get('default')
-
- help = '%s (%s)' % (option.get('help'), ", ".join(option['values']))
- result.Add(name, help, default)
- return True
-
-
def GetOptions():
result = Options()
result.Add('mode', 'compilation mode (debug, release)', 'release')
@@ -868,28 +819,12 @@ def GetOptions():
result.Add('cache', 'directory to use for scons build cache', '')
result.Add('env', 'override environment settings (NAME0:value0,NAME1:value1,...)', '')
result.Add('importenv', 'import environment settings (NAME0,NAME1,...)', '')
- options = SIMPLE_OPTIONS
- while len(options):
- postpone = {}
- for (name, option) in options.iteritems():
- if not AddOption(result, name, option):
- postpone[name] = option
- options = postpone
+ for (name, option) in SIMPLE_OPTIONS.iteritems():
+ help = '%s (%s)' % (name, ", ".join(option['values']))
+ result.Add(name, help, option.get('default'))
return result
-def GetTools(opts):
- env = Environment(options=opts)
- os = env['os']
- toolchain = env['toolchain']
- if os == 'win32' and toolchain == 'gcc':
- return ['mingw']
- elif os == 'win32' and toolchain == 'msvc':
- return ['msvc', 'mslink', 'mslib', 'msvs']
- else:
- return ['default']
-
-
def GetVersionComponents():
MAJOR_VERSION_PATTERN = re.compile(r"#define\s+MAJOR_VERSION\s+(.*)")
MINOR_VERSION_PATTERN = re.compile(r"#define\s+MINOR_VERSION\s+(.*)")
@@ -970,7 +905,7 @@ def VerifyOptions(env):
print env['simulator']
Abort("Option unalignedaccesses only supported for the ARM architecture.")
for (name, option) in SIMPLE_OPTIONS.iteritems():
- if (not name in env):
+ if (not option.get('default')) and (name not in ARGUMENTS):
message = ("A value for option %s must be specified (%s)." %
(name, ", ".join(option['values'])))
Abort(message)
@@ -1098,7 +1033,7 @@ def ParseEnvOverrides(arg, imports):
return overrides
-def BuildSpecific(env, mode, env_overrides, tools):
+def BuildSpecific(env, mode, env_overrides):
options = {'mode': mode}
for option in SIMPLE_OPTIONS:
options[option] = env[option]
@@ -1151,7 +1086,7 @@ def BuildSpecific(env, mode, env_overrides, tools):
(object_files, shell_files, mksnapshot) = env.SConscript(
join('src', 'SConscript'),
build_dir=join('obj', target_id),
- exports='context tools',
+ exports='context',
duplicate=False
)
@@ -1171,21 +1106,21 @@ def BuildSpecific(env, mode, env_overrides, tools):
library = env.SharedLibrary(library_name, object_files, PDB=pdb_name)
context.library_targets.append(library)
- d8_env = Environment(tools=tools)
+ d8_env = Environment()
d8_env.Replace(**context.flags['d8'])
context.ApplyEnvOverrides(d8_env)
shell = d8_env.Program('d8' + suffix, object_files + shell_files)
context.d8_targets.append(shell)
for sample in context.samples:
- sample_env = Environment(tools=tools)
+ sample_env = Environment()
sample_env.Replace(**context.flags['sample'])
sample_env.Prepend(LIBS=[library_name])
context.ApplyEnvOverrides(sample_env)
sample_object = sample_env.SConscript(
join('samples', 'SConscript'),
build_dir=join('obj', 'sample', sample, target_id),
- exports='sample context tools',
+ exports='sample context',
duplicate=False
)
sample_name = sample + suffix
@@ -1198,7 +1133,7 @@ def BuildSpecific(env, mode, env_overrides, tools):
cctest_program = cctest_env.SConscript(
join('test', 'cctest', 'SConscript'),
build_dir=join('obj', 'test', target_id),
- exports='context object_files tools',
+ exports='context object_files',
duplicate=False
)
context.cctest_targets.append(cctest_program)
@@ -1208,9 +1143,7 @@ def BuildSpecific(env, mode, env_overrides, tools):
def Build():
opts = GetOptions()
- tools = GetTools(opts)
- env = Environment(options=opts, tools=tools)
-
+ env = Environment(options=opts)
Help(opts.GenerateHelpText(env))
VerifyOptions(env)
env_overrides = ParseEnvOverrides(env['env'], env['importenv'])
@@ -1224,7 +1157,7 @@ def Build():
d8s = []
modes = SplitList(env['mode'])
for mode in modes:
- context = BuildSpecific(env.Copy(), mode, env_overrides, tools)
+ context = BuildSpecific(env.Copy(), mode, env_overrides)
libraries += context.library_targets
mksnapshots += context.mksnapshot_targets
cctests += context.cctest_targets
diff --git a/deps/v8/src/SConscript b/deps/v8/src/SConscript
index 598e4af56..34ca91ca6 100755
--- a/deps/v8/src/SConscript
+++ b/deps/v8/src/SConscript
@@ -31,7 +31,6 @@ root_dir = dirname(File('SConstruct').rfile().abspath)
sys.path.append(join(root_dir, 'tools'))
import js2c
Import('context')
-Import('tools')
SOURCES = {
@@ -306,7 +305,7 @@ def Abort(message):
def ConfigureObjectFiles():
- env = Environment(tools=tools)
+ env = Environment()
env.Replace(**context.flags['v8'])
context.ApplyEnvOverrides(env)
env['BUILDERS']['JS2C'] = Builder(action=js2c.JS2C)
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
index 87fa87df0..7d374eecd 100644
--- a/deps/v8/src/arm/code-stubs-arm.cc
+++ b/deps/v8/src/arm/code-stubs-arm.cc
@@ -1296,6 +1296,9 @@ void CompareStub::Generate(MacroAssembler* masm) {
// This stub does not handle the inlined cases (Smis, Booleans, undefined).
// The stub returns zero for false, and a non-zero value for true.
void ToBooleanStub::Generate(MacroAssembler* masm) {
+ // This stub uses VFP3 instructions.
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+
Label false_result;
Label not_heap_number;
Register scratch = r9.is(tos_) ? r7 : r9;
@@ -5957,11 +5960,10 @@ void DirectCEntryStub::Generate(MacroAssembler* masm) {
void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
- ApiFunction *function) {
+ ExternalReference function) {
__ mov(lr, Operand(reinterpret_cast<intptr_t>(GetCode().location()),
RelocInfo::CODE_TARGET));
- __ mov(r2,
- Operand(ExternalReference(function, ExternalReference::DIRECT_CALL)));
+ __ mov(r2, Operand(function));
// Push return address (accessible to GC through exit frame pc).
__ str(pc, MemOperand(sp, 0));
__ Jump(r2); // Call the api function.
diff --git a/deps/v8/src/arm/code-stubs-arm.h b/deps/v8/src/arm/code-stubs-arm.h
index 475fbd70e..0e707f41c 100644
--- a/deps/v8/src/arm/code-stubs-arm.h
+++ b/deps/v8/src/arm/code-stubs-arm.h
@@ -592,7 +592,7 @@ class DirectCEntryStub: public CodeStub {
public:
DirectCEntryStub() {}
void Generate(MacroAssembler* masm);
- void GenerateCall(MacroAssembler* masm, ApiFunction *function);
+ void GenerateCall(MacroAssembler* masm, ExternalReference function);
void GenerateCall(MacroAssembler* masm, Register target);
private:
diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc
index fea9a8cfb..9f521fb31 100644
--- a/deps/v8/src/arm/full-codegen-arm.cc
+++ b/deps/v8/src/arm/full-codegen-arm.cc
@@ -574,13 +574,38 @@ void FullCodeGenerator::TestContext::Plug(bool flag) const {
void FullCodeGenerator::DoTest(Label* if_true,
Label* if_false,
Label* fall_through) {
- // Call the runtime to find the boolean value of the source and then
- // translate it into control flow to the pair of labels.
- __ push(result_register());
- __ CallRuntime(Runtime::kToBool, 1);
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ cmp(r0, ip);
- Split(eq, if_true, if_false, fall_through);
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ // Emit the inlined tests assumed by the stub.
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ cmp(result_register(), ip);
+ __ b(eq, if_false);
+ __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+ __ cmp(result_register(), ip);
+ __ b(eq, if_true);
+ __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+ __ cmp(result_register(), ip);
+ __ b(eq, if_false);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ tst(result_register(), result_register());
+ __ b(eq, if_false);
+ __ JumpIfSmi(result_register(), if_true);
+
+ // Call the ToBoolean stub for all other cases.
+ ToBooleanStub stub(result_register());
+ __ CallStub(&stub);
+ __ tst(result_register(), result_register());
+ } else {
+ // Call the runtime to find the boolean value of the source and then
+ // translate it into control flow to the pair of labels.
+ __ push(result_register());
+ __ CallRuntime(Runtime::kToBool, 1);
+ __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+ __ cmp(r0, ip);
+ }
+
+ // The stub returns nonzero for true.
+ Split(ne, if_true, if_false, fall_through);
}
@@ -796,9 +821,9 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Comment cmnt(masm_, "[ SwitchStatement");
Breakable nested_statement(this, stmt);
SetStatementPosition(stmt);
+
// Keep the switch value on the stack until a case matches.
VisitForStackValue(stmt->tag());
-
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
ZoneList<CaseClause*>* clauses = stmt->cases();
@@ -887,8 +912,9 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(r0, ip);
__ b(eq, &exit);
- __ LoadRoot(ip, Heap::kNullValueRootIndex);
- __ cmp(r0, ip);
+ Register null_value = r5;
+ __ LoadRoot(null_value, Heap::kNullValueRootIndex);
+ __ cmp(r0, null_value);
__ b(eq, &exit);
// Convert the object to a JS object.
@@ -902,12 +928,62 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ bind(&done_convert);
__ push(r0);
- // BUG(867): Check cache validity in generated code. This is a fast
- // case for the JSObject::IsSimpleEnum cache validity checks. If we
- // cannot guarantee cache validity, call the runtime system to check
- // cache validity or get the property names in a fixed array.
+ // Check cache validity in generated code. This is a fast case for
+ // the JSObject::IsSimpleEnum cache validity checks. If we cannot
+ // guarantee cache validity, call the runtime system to check cache
+ // validity or get the property names in a fixed array.
+ Label next, call_runtime;
+ // Preload a couple of values used in the loop.
+ Register empty_fixed_array_value = r6;
+ __ LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
+ Register empty_descriptor_array_value = r7;
+ __ LoadRoot(empty_descriptor_array_value,
+ Heap::kEmptyDescriptorArrayRootIndex);
+ __ mov(r1, r0);
+ __ bind(&next);
+
+ // Check that there are no elements. Register r1 contains the
+ // current JS object we've reached through the prototype chain.
+ __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
+ __ cmp(r2, empty_fixed_array_value);
+ __ b(ne, &call_runtime);
+
+ // Check that instance descriptors are not empty so that we can
+ // check for an enum cache. Leave the map in r2 for the subsequent
+ // prototype load.
+ __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ ldr(r3, FieldMemOperand(r2, Map::kInstanceDescriptorsOffset));
+ __ cmp(r3, empty_descriptor_array_value);
+ __ b(eq, &call_runtime);
+
+ // Check that there is an enum cache in the non-empty instance
+ // descriptors (r3). This is the case if the next enumeration
+ // index field does not contain a smi.
+ __ ldr(r3, FieldMemOperand(r3, DescriptorArray::kEnumerationIndexOffset));
+ __ JumpIfSmi(r3, &call_runtime);
+
+ // For all objects but the receiver, check that the cache is empty.
+ Label check_prototype;
+ __ cmp(r1, r0);
+ __ b(eq, &check_prototype);
+ __ ldr(r3, FieldMemOperand(r3, DescriptorArray::kEnumCacheBridgeCacheOffset));
+ __ cmp(r3, empty_fixed_array_value);
+ __ b(ne, &call_runtime);
+
+ // Load the prototype from the map and loop if non-null.
+ __ bind(&check_prototype);
+ __ ldr(r1, FieldMemOperand(r2, Map::kPrototypeOffset));
+ __ cmp(r1, null_value);
+ __ b(ne, &next);
+
+ // The enum cache is valid. Load the map of the object being
+ // iterated over and use the cache for the iteration.
+ Label use_cache;
+ __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ b(&use_cache);
// Get the set of properties to enumerate.
+ __ bind(&call_runtime);
__ push(r0); // Duplicate the enumerable object on the stack.
__ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
@@ -922,6 +998,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ b(ne, &fixed_array);
// We got a map in register r0. Get the enumeration cache from it.
+ __ bind(&use_cache);
__ ldr(r1, FieldMemOperand(r0, Map::kInstanceDescriptorsOffset));
__ ldr(r1, FieldMemOperand(r1, DescriptorArray::kEnumerationIndexOffset));
__ ldr(r2, FieldMemOperand(r1, DescriptorArray::kEnumCacheBridgeCacheOffset));
@@ -1010,8 +1087,14 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
bool pretenure) {
// Use the fast case closure allocation code that allocates in new
- // space for nested functions that don't need literals cloning.
- if (scope()->is_function_scope() &&
+ // space for nested functions that don't need literals cloning. If
+ // we're running with the --always-opt or the --prepare-always-opt
+ // flag, we need to use the runtime function so that the new function
+ // we are creating here gets a chance to have its code optimized and
+ // doesn't just get a copy of the existing unoptimized code.
+ if (!FLAG_always_opt &&
+ !FLAG_prepare_always_opt &&
+ scope()->is_function_scope() &&
info->num_literals() == 0 &&
!pretenure) {
FastNewClosureStub stub;
@@ -1265,18 +1348,19 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
Comment cmnt(masm_, "[ RegExpLiteral");
Label materialized;
// Registers will be used as follows:
+ // r5 = materialized value (RegExp literal)
// r4 = JS function, literals array
// r3 = literal index
// r2 = RegExp pattern
// r1 = RegExp flags
- // r0 = temp + materialized value (RegExp literal)
+ // r0 = RegExp literal clone
__ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ ldr(r4, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
int literal_offset =
FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
- __ ldr(r0, FieldMemOperand(r4, literal_offset));
+ __ ldr(r5, FieldMemOperand(r4, literal_offset));
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(r0, ip);
+ __ cmp(r5, ip);
__ b(ne, &materialized);
// Create regexp literal using runtime function.
@@ -1286,20 +1370,27 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
__ mov(r1, Operand(expr->flags()));
__ Push(r4, r3, r2, r1);
__ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
+ __ mov(r5, r0);
__ bind(&materialized);
int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
- __ push(r0);
+ Label allocated, runtime_allocate;
+ __ AllocateInNewSpace(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
+ __ jmp(&allocated);
+
+ __ bind(&runtime_allocate);
+ __ push(r5);
__ mov(r0, Operand(Smi::FromInt(size)));
__ push(r0);
__ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ pop(r5);
+ __ bind(&allocated);
// After this, registers are used as follows:
// r0: Newly allocated regexp.
- // r1: Materialized regexp.
+ // r5: Materialized regexp.
// r2: temp.
- __ pop(r1);
- __ CopyFields(r0, r1, r2.bit(), size / kPointerSize);
+ __ CopyFields(r0, r5, r2.bit(), size / kPointerSize);
context()->Plug(r0);
}
@@ -3134,37 +3225,40 @@ void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) {
void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
- // Load the argument on the stack and call the runtime.
+ // Load the argument on the stack and call the stub.
+ TranscendentalCacheStub stub(TranscendentalCache::SIN);
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
- __ CallRuntime(Runtime::kMath_sin, 1);
+ __ CallStub(&stub);
context()->Plug(r0);
}
void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
- // Load the argument on the stack and call the runtime.
+ // Load the argument on the stack and call the stub.
+ TranscendentalCacheStub stub(TranscendentalCache::COS);
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
- __ CallRuntime(Runtime::kMath_cos, 1);
+ __ CallStub(&stub);
context()->Plug(r0);
}
-void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
- // Load the argument on the stack and call the runtime function.
+void FullCodeGenerator::EmitMathLog(ZoneList<Expression*>* args) {
+ // Load the argument on the stack and call the stub.
+ TranscendentalCacheStub stub(TranscendentalCache::LOG);
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
- __ CallRuntime(Runtime::kMath_sqrt, 1);
+ __ CallStub(&stub);
context()->Plug(r0);
}
-void FullCodeGenerator::EmitMathLog(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
// Load the argument on the stack and call the runtime function.
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
- __ CallRuntime(Runtime::kMath_log, 1);
+ __ CallRuntime(Runtime::kMath_sqrt, 1);
context()->Plug(r0);
}
@@ -3323,8 +3417,14 @@ void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
+
+ if (FLAG_debug_code) {
+ __ AbortIfNotString(r0);
+ }
+
__ ldr(r0, FieldMemOperand(r0, String::kHashFieldOffset));
__ IndexFromHash(r0, r0);
+
context()->Plug(r0);
}
@@ -3486,9 +3586,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
UnaryOverwriteMode overwrite =
can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
- GenericUnaryOpStub stub(Token::SUB,
- overwrite,
- NO_UNARY_FLAGS);
+ GenericUnaryOpStub stub(Token::SUB, overwrite, NO_UNARY_FLAGS);
// GenericUnaryOpStub expects the argument to be in the
// accumulator register r0.
VisitForAccumulatorValue(expr->expression());
@@ -3998,11 +4096,43 @@ Register FullCodeGenerator::context_register() {
void FullCodeGenerator::EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode) {
ASSERT(mode == RelocInfo::CODE_TARGET ||
mode == RelocInfo::CODE_TARGET_CONTEXT);
+ switch (ic->kind()) {
+ case Code::LOAD_IC:
+ __ IncrementCounter(&Counters::named_load_full, 1, r1, r2);
+ break;
+ case Code::KEYED_LOAD_IC:
+ __ IncrementCounter(&Counters::keyed_load_full, 1, r1, r2);
+ break;
+ case Code::STORE_IC:
+ __ IncrementCounter(&Counters::named_store_full, 1, r1, r2);
+ break;
+ case Code::KEYED_STORE_IC:
+ __ IncrementCounter(&Counters::keyed_store_full, 1, r1, r2);
+ default:
+ break;
+ }
+
__ Call(ic, mode);
}
void FullCodeGenerator::EmitCallIC(Handle<Code> ic, JumpPatchSite* patch_site) {
+ switch (ic->kind()) {
+ case Code::LOAD_IC:
+ __ IncrementCounter(&Counters::named_load_full, 1, r1, r2);
+ break;
+ case Code::KEYED_LOAD_IC:
+ __ IncrementCounter(&Counters::keyed_load_full, 1, r1, r2);
+ break;
+ case Code::STORE_IC:
+ __ IncrementCounter(&Counters::named_store_full, 1, r1, r2);
+ break;
+ case Code::KEYED_STORE_IC:
+ __ IncrementCounter(&Counters::keyed_store_full, 1, r1, r2);
+ default:
+ break;
+ }
+
__ Call(ic, RelocInfo::CODE_TARGET);
if (patch_site != NULL && patch_site->is_bound()) {
patch_site->EmitPatchInfo();
diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc
index ca6444227..d375617ad 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.cc
+++ b/deps/v8/src/arm/lithium-codegen-arm.cc
@@ -3371,30 +3371,35 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
Register scratch1 = scratch0();
Register scratch2 = ToRegister(instr->TempAt(0));
- VFPRoundingMode rounding_mode = instr->truncating() ? kRoundToMinusInf
- : kRoundToNearest;
-
- EmitVFPTruncate(rounding_mode,
+ EmitVFPTruncate(kRoundToZero,
single_scratch,
double_input,
scratch1,
scratch2);
// Deoptimize if we had a vfp invalid exception.
DeoptimizeIf(ne, instr->environment());
+
// Retrieve the result.
__ vmov(result_reg, single_scratch);
- if (instr->truncating() &&
- instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label done;
- __ cmp(result_reg, Operand(0));
- __ b(ne, &done);
- // Check for -0.
- __ vmov(scratch1, double_input.high());
- __ tst(scratch1, Operand(HeapNumber::kSignMask));
+ if (!instr->truncating()) {
+ // Convert result back to double and compare with input
+ // to check if the conversion was exact.
+ __ vmov(single_scratch, result_reg);
+ __ vcvt_f64_s32(double_scratch0(), single_scratch);
+ __ VFPCompareAndSetFlags(double_scratch0(), double_input);
DeoptimizeIf(ne, instr->environment());
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label done;
+ __ cmp(result_reg, Operand(0));
+ __ b(ne, &done);
+ // Check for -0.
+ __ vmov(scratch1, double_input.high());
+ __ tst(scratch1, Operand(HeapNumber::kSignMask));
+ DeoptimizeIf(ne, instr->environment());
- __ bind(&done);
+ __ bind(&done);
+ }
}
}
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index 65c92f9e1..e0f291644 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -1618,7 +1618,7 @@ static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
- ApiFunction* function, int stack_space) {
+ ExternalReference function, int stack_space) {
ExternalReference next_address =
ExternalReference::handle_scope_next_address();
const int kNextOffset = 0;
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index 83c59a6f6..3e13c783d 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -690,7 +690,7 @@ class MacroAssembler: public Assembler {
// from handle and propagates exceptions. Restores context.
// stack_space - space to be unwound on exit (includes the call js
// arguments space and the additional space allocated for the fast call).
- MaybeObject* TryCallApiFunctionAndReturn(ApiFunction* function,
+ MaybeObject* TryCallApiFunctionAndReturn(ExternalReference function,
int stack_space);
// Jump to a runtime routine.
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index 8104747f1..20d51c6af 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -1531,7 +1531,11 @@ typedef double (*SimulatorRuntimeFPCall)(int32_t arg0,
// This signature supports direct call in to API function native callback
// (refer to InvocationCallback in v8.h).
-typedef v8::Handle<v8::Value> (*SimulatorRuntimeApiCall)(int32_t arg0);
+typedef v8::Handle<v8::Value> (*SimulatorRuntimeDirectApiCall)(int32_t arg0);
+
+// This signature supports direct call to accessor getter callback.
+typedef v8::Handle<v8::Value> (*SimulatorRuntimeDirectGetterCall)(int32_t arg0,
+ int32_t arg1);
// Software interrupt instructions are used by the simulator to call into the
// C-based V8 runtime.
@@ -1572,14 +1576,12 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
CHECK(stack_aligned);
double result = target(arg0, arg1, arg2, arg3);
SetFpResult(result);
- } else if (redirection->type() == ExternalReference::DIRECT_CALL) {
- SimulatorRuntimeApiCall target =
- reinterpret_cast<SimulatorRuntimeApiCall>(external);
+ } else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
+ SimulatorRuntimeDirectApiCall target =
+ reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
- PrintF(
- "Call to host function at %p args %08x",
- FUNCTION_ADDR(target),
- arg0);
+ PrintF("Call to host function at %p args %08x",
+ FUNCTION_ADDR(target), arg0);
if (!stack_aligned) {
PrintF(" with unaligned stack %08x\n", get_register(sp));
}
@@ -1591,6 +1593,23 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
PrintF("Returned %p\n", reinterpret_cast<void *>(*result));
}
set_register(r0, (int32_t) *result);
+ } else if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
+ SimulatorRuntimeDirectGetterCall target =
+ reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
+ if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
+ PrintF("Call to host function at %p args %08x %08x",
+ FUNCTION_ADDR(target), arg0, arg1);
+ if (!stack_aligned) {
+ PrintF(" with unaligned stack %08x\n", get_register(sp));
+ }
+ PrintF("\n");
+ }
+ CHECK(stack_aligned);
+ v8::Handle<v8::Value> result = target(arg0, arg1);
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Returned %p\n", reinterpret_cast<void *>(*result));
+ }
+ set_register(r0, (int32_t) *result);
} else {
// builtin call.
ASSERT(redirection->type() == ExternalReference::BUILTIN_CALL);
diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc
index e623ea191..e2501125a 100644
--- a/deps/v8/src/arm/stub-cache-arm.cc
+++ b/deps/v8/src/arm/stub-cache-arm.cc
@@ -655,12 +655,10 @@ static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm,
// already generated). Do not allow the assembler to perform a
// garbage collection but instead return the allocation failure
// object.
- MaybeObject* result = masm->TryCallApiFunctionAndReturn(
- &fun, argc + kFastApiCallArguments + 1);
- if (result->IsFailure()) {
- return result;
- }
- return Heap::undefined_value();
+ const int kStackUnwindSpace = argc + kFastApiCallArguments + 1;
+ ExternalReference ref =
+ ExternalReference(&fun, ExternalReference::DIRECT_API_CALL);
+ return masm->TryCallApiFunctionAndReturn(ref, kStackUnwindSpace);
}
class CallInterceptorCompiler BASE_EMBEDDED {
@@ -1245,18 +1243,38 @@ MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
CheckPrototypes(object, receiver, holder, scratch1, scratch2, scratch3,
name, miss);
- // Push the arguments on the JS stack of the caller.
- __ push(receiver); // Receiver.
- __ mov(scratch3, Operand(Handle<AccessorInfo>(callback))); // callback data
- __ ldr(ip, FieldMemOperand(scratch3, AccessorInfo::kDataOffset));
- __ Push(reg, ip, scratch3, name_reg);
+ // Build AccessorInfo::args_ list on the stack and push property name below
+ // the exit frame to make GC aware of them and store pointers to them.
+ __ push(receiver);
+ __ mov(scratch2, sp); // scratch2 = AccessorInfo::args_
+ Handle<AccessorInfo> callback_handle(callback);
+ if (Heap::InNewSpace(callback_handle->data())) {
+ __ Move(scratch3, callback_handle);
+ __ ldr(scratch3, FieldMemOperand(scratch3, AccessorInfo::kDataOffset));
+ } else {
+ __ Move(scratch3, Handle<Object>(callback_handle->data()));
+ }
+ __ Push(reg, scratch3, name_reg);
+ __ mov(r0, sp); // r0 = Handle<String>
+
+ Address getter_address = v8::ToCData<Address>(callback->getter());
+ ApiFunction fun(getter_address);
- // Do tail-call to the runtime system.
- ExternalReference load_callback_property =
- ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
- __ TailCallExternalReference(load_callback_property, 5, 1);
+ const int kApiStackSpace = 1;
+ __ EnterExitFrame(false, kApiStackSpace);
+ // Create AccessorInfo instance on the stack above the exit frame with
+ // scratch2 (internal::Object **args_) as the data.
+ __ str(scratch2, MemOperand(sp, 1 * kPointerSize));
+ __ add(r1, sp, Operand(1 * kPointerSize)); // r1 = AccessorInfo&
- return Heap::undefined_value(); // Success.
+ // Emitting a stub call may try to allocate (if the code is not
+ // already generated). Do not allow the assembler to perform a
+ // garbage collection but instead return the allocation failure
+ // object.
+ const int kStackUnwindSpace = 4;
+ ExternalReference ref =
+ ExternalReference(&fun, ExternalReference::DIRECT_GETTER_CALL);
+ return masm()->TryCallApiFunctionAndReturn(ref, kStackUnwindSpace);
}
diff --git a/deps/v8/src/array.js b/deps/v8/src/array.js
index ef82674d7..0753f1e2a 100644
--- a/deps/v8/src/array.js
+++ b/deps/v8/src/array.js
@@ -418,7 +418,6 @@ function ArrayPush() {
function ArrayConcat(arg1) { // length == 1
- // TODO: can we just use arguments?
var arg_count = %_ArgumentsLength();
var arrays = new $Array(1 + arg_count);
arrays[0] = this;
@@ -1018,13 +1017,13 @@ function ArrayIndexOf(element, index) {
}
var min = index;
var max = length;
- if (UseSparseVariant(this, length, true)) {
+ if (UseSparseVariant(this, length, IS_ARRAY(this))) {
var intervals = %GetArrayKeys(this, length);
if (intervals.length == 2 && intervals[0] < 0) {
// A single interval.
var intervalMin = -(intervals[0] + 1);
var intervalMax = intervalMin + intervals[1];
- min = MAX(min, intervalMin);
+ if (min < intervalMin) min = intervalMin;
max = intervalMax; // Capped by length already.
// Fall through to loop below.
} else {
@@ -1074,13 +1073,13 @@ function ArrayLastIndexOf(element, index) {
}
var min = 0;
var max = index;
- if (UseSparseVariant(this, length, true)) {
+ if (UseSparseVariant(this, length, IS_ARRAY(this))) {
var intervals = %GetArrayKeys(this, index + 1);
if (intervals.length == 2 && intervals[0] < 0) {
// A single interval.
var intervalMin = -(intervals[0] + 1);
var intervalMax = intervalMin + intervals[1];
- min = MAX(min, intervalMin);
+ if (min < intervalMin) min = intervalMin;
max = intervalMax; // Capped by index already.
// Fall through to loop below.
} else {
diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc
index a323ecaa4..030d15c51 100644
--- a/deps/v8/src/assembler.cc
+++ b/deps/v8/src/assembler.cc
@@ -252,7 +252,7 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
WriteExtraTaggedPC(pc_delta, kPCJumpTag);
WriteExtraTaggedData(rinfo->data() - last_data_, kCommentTag);
last_data_ = rinfo->data();
- ASSERT(begin_pos - pos_ == RelocInfo::kRelocCommentSize);
+ ASSERT(begin_pos - pos_ >= RelocInfo::kMinRelocCommentSize);
} else {
// For all other modes we simply use the mode as the extra tag.
// None of these modes need a data component.
diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h
index 095859840..9e6aa087a 100644
--- a/deps/v8/src/assembler.h
+++ b/deps/v8/src/assembler.h
@@ -184,10 +184,10 @@ class RelocInfo BASE_EMBEDDED {
// we do not normally record relocation info.
static const char* kFillerCommentString;
- // The size of a comment is equal to tree bytes for the extra tagged pc +
- // the tag for the data, and kPointerSize for the actual pointer to the
+ // The minimum size of a comment is equal to three bytes for the extra tagged
+ // pc + the tag for the data, and kPointerSize for the actual pointer to the
// comment.
- static const int kRelocCommentSize = 3 + kPointerSize;
+ static const int kMinRelocCommentSize = 3 + kPointerSize;
// The maximum size for a call instruction including pc-jump.
static const int kMaxCallSize = 6;
@@ -481,21 +481,22 @@ class Debug_Address;
class ExternalReference BASE_EMBEDDED {
public:
// Used in the simulator to support different native api calls.
- //
- // BUILTIN_CALL - builtin call.
- // MaybeObject* f(v8::internal::Arguments).
- //
- // FP_RETURN_CALL - builtin call that returns floating point.
- // double f(double, double).
- //
- // DIRECT_CALL - direct call to API function native callback
- // from generated code.
- // Handle<Value> f(v8::Arguments&)
- //
enum Type {
+ // Builtin call.
+ // MaybeObject* f(v8::internal::Arguments).
BUILTIN_CALL, // default
+
+ // Builtin call that returns floating point.
+ // double f(double, double).
FP_RETURN_CALL,
- DIRECT_CALL
+
+ // Direct call to API function callback.
+ // Handle<Value> f(v8::Arguments&)
+ DIRECT_API_CALL,
+
+ // Direct call to accessor getter callback.
+ // Handle<value> f(Local<String> property, AccessorInfo& info)
+ DIRECT_GETTER_CALL
};
typedef void* ExternalReferenceRedirector(void* original, Type type);
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h
index 96f63c530..cf13def19 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flag-definitions.h
@@ -135,11 +135,8 @@ DEFINE_bool(deoptimize_uncommon_cases, true, "deoptimize uncommon cases")
DEFINE_bool(polymorphic_inlining, true, "polymorphic inlining")
DEFINE_bool(aggressive_loop_invariant_motion, true,
"aggressive motion of instructions out of loops")
-#ifdef V8_TARGET_ARCH_X64
-DEFINE_bool(use_osr, false, "use on-stack replacement")
-#else
DEFINE_bool(use_osr, true, "use on-stack replacement")
-#endif
+
DEFINE_bool(trace_osr, false, "trace on-stack replacement")
DEFINE_int(stress_runs, 0, "number of stress runs")
DEFINE_bool(optimize_closures, true, "optimize closures")
diff --git a/deps/v8/src/frame-element.h b/deps/v8/src/frame-element.h
index 3b91b9d34..ae5d6a1bf 100644
--- a/deps/v8/src/frame-element.h
+++ b/deps/v8/src/frame-element.h
@@ -113,6 +113,10 @@ class FrameElement BASE_EMBEDDED {
static ZoneObjectList* ConstantList();
+ static bool ConstantPoolOverflowed() {
+ return !DataField::is_valid(ConstantList()->length());
+ }
+
// Clear the constants indirection table.
static void ClearConstantList() {
ConstantList()->Clear();
diff --git a/deps/v8/src/handles-inl.h b/deps/v8/src/handles-inl.h
index b31351256..1811023ff 100644
--- a/deps/v8/src/handles-inl.h
+++ b/deps/v8/src/handles-inl.h
@@ -36,14 +36,14 @@
namespace v8 {
namespace internal {
-template<class T>
+template<typename T>
Handle<T>::Handle(T* obj) {
ASSERT(!obj->IsFailure());
location_ = HandleScope::CreateHandle(obj);
}
-template <class T>
+template <typename T>
inline T* Handle<T>::operator*() const {
ASSERT(location_ != NULL);
ASSERT(reinterpret_cast<Address>(*location_) != kHandleZapValue);
@@ -51,6 +51,16 @@ inline T* Handle<T>::operator*() const {
}
+template <typename T>
+HandleCell<T>::HandleCell(T* value)
+ : location_(HandleScope::CreateHandle(value)) { }
+
+
+template <typename T>
+HandleCell<T>::HandleCell(Handle<T> value)
+ : location_(HandleScope::CreateHandle(*value)) { }
+
+
#ifdef DEBUG
inline NoHandleAllocation::NoHandleAllocation() {
v8::ImplementationUtilities::HandleScopeData* current =
diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles.cc
index b48aa507e..8b2f95b52 100644
--- a/deps/v8/src/handles.cc
+++ b/deps/v8/src/handles.cc
@@ -863,10 +863,12 @@ bool CompileLazyInLoop(Handle<JSFunction> function,
}
-bool CompileOptimized(Handle<JSFunction> function, int osr_ast_id) {
+bool CompileOptimized(Handle<JSFunction> function,
+ int osr_ast_id,
+ ClearExceptionFlag flag) {
CompilationInfo info(function);
info.SetOptimizing(osr_ast_id);
- return CompileLazyHelper(&info, KEEP_EXCEPTION);
+ return CompileLazyHelper(&info, flag);
}
diff --git a/deps/v8/src/handles.h b/deps/v8/src/handles.h
index d95ca9117..8f1664bb4 100644
--- a/deps/v8/src/handles.h
+++ b/deps/v8/src/handles.h
@@ -39,7 +39,7 @@ namespace internal {
// Handles are only valid within a HandleScope.
// When a handle is created for an object a cell is allocated in the heap.
-template<class T>
+template<typename T>
class Handle {
public:
INLINE(explicit Handle(T** location)) { location_ = location; }
@@ -93,6 +93,55 @@ class Handle {
};
+// A handle-scope based variable. The value stored in the variable can change
+// over time. The value stored in the variable at any time is a root
+// for garbage collection.
+// The variable is backed by the current HandleScope.
+template <typename T>
+class HandleCell {
+ public:
+ // Create a new HandleCell holding the given value.
+ explicit HandleCell(Handle<T> value);
+ explicit HandleCell(T* value);
+
+ // Create an alias of an existing HandleCell.
+ explicit HandleCell(const HandleCell<T>& value)
+ : location_(value.location_) { }
+
+ INLINE(T* operator->() const) { return operator*(); }
+ INLINE(T* operator*() const) {
+ return *location_;
+ }
+ INLINE(void operator=(T* value)) {
+ *location_ = value;
+ }
+ INLINE(void operator=(Handle<T> value)) {
+ *location_ = *value;
+ }
+ INLINE(void operator=(const HandleCell<T>& value)) {
+ *location_ = *value.location_;
+ }
+
+ // Extract the value of the variable and cast it to a give type.
+ // This is typically used for calling methods on a more specialized type.
+ template <typename S>
+ inline S* cast() {
+ S::cast(*location_);
+ return *reinterpret_cast<S**>(location_);
+ }
+
+ Handle<T> ToHandle() const {
+ return Handle<T>(*location_);
+ }
+
+ private:
+ // Prevent implicit constructor from being created.
+ HandleCell();
+
+ T** location_;
+};
+
+
// A stack-allocated class that governs a number of local handles.
// After a handle scope has been created, all local handles will be
// allocated within that handle scope until either the handle scope is
@@ -354,7 +403,9 @@ bool CompileLazy(Handle<JSFunction> function, ClearExceptionFlag flag);
bool CompileLazyInLoop(Handle<JSFunction> function, ClearExceptionFlag flag);
-bool CompileOptimized(Handle<JSFunction> function, int osr_ast_id);
+bool CompileOptimized(Handle<JSFunction> function,
+ int osr_ast_id,
+ ClearExceptionFlag flag);
class NoHandleAllocation BASE_EMBEDDED {
public:
diff --git a/deps/v8/src/hydrogen.cc b/deps/v8/src/hydrogen.cc
index 9e40a50c7..2de70ffa5 100644
--- a/deps/v8/src/hydrogen.cc
+++ b/deps/v8/src/hydrogen.cc
@@ -482,128 +482,84 @@ HConstant* HGraph::GetConstantFalse() {
}
-void HSubgraph::AppendJoin(HBasicBlock* first,
- HBasicBlock* second,
- int join_id) {
+HBasicBlock* HGraphBuilder::CreateJoin(HBasicBlock* first,
+ HBasicBlock* second,
+ int join_id) {
if (first == NULL) {
- exit_block_ = second;
+ return second;
} else if (second == NULL) {
- exit_block_ = first;
+ return first;
} else {
HBasicBlock* join_block = graph_->CreateBasicBlock();
first->Goto(join_block);
second->Goto(join_block);
join_block->SetJoinId(join_id);
- exit_block_ = join_block;
+ return join_block;
}
}
-void HSubgraph::ResolveContinue(IterationStatement* statement,
- HBasicBlock* continue_block) {
+HBasicBlock* HGraphBuilder::JoinContinue(IterationStatement* statement,
+ HBasicBlock* exit_block,
+ HBasicBlock* continue_block) {
if (continue_block != NULL) {
continue_block->SetJoinId(statement->ContinueId());
}
- exit_block_ =
- JoinBlocks(exit_block(), continue_block, statement->ContinueId());
+ return CreateJoin(exit_block, continue_block, statement->ContinueId());
}
-HBasicBlock* HSubgraph::JoinBlocks(HBasicBlock* a, HBasicBlock* b, int id) {
- if (a == NULL) return b;
- if (b == NULL) return a;
- HBasicBlock* target = graph_->CreateBasicBlock();
- a->Goto(target);
- b->Goto(target);
- target->SetJoinId(id);
- return target;
-}
-
-
-void HSubgraph::AppendEndless(IterationStatement* statement,
- HBasicBlock* body_entry,
- HBasicBlock* body_exit,
- HBasicBlock* break_block) {
- if (exit_block() != NULL) {
- exit_block()->Goto(body_entry, false);
- }
- if (body_exit != NULL) {
- body_exit->Goto(body_entry, true);
- }
+HBasicBlock* HGraphBuilder::CreateEndless(IterationStatement* statement,
+ HBasicBlock* body_entry,
+ HBasicBlock* body_exit,
+ HBasicBlock* break_block) {
+ if (body_exit != NULL) body_exit->Goto(body_entry, true);
if (break_block != NULL) break_block->SetJoinId(statement->ExitId());
- exit_block_ = break_block;
body_entry->PostProcessLoopHeader(statement);
+ return break_block;
}
-void HSubgraph::AppendDoWhile(IterationStatement* statement,
- HBasicBlock* body_entry,
- HBasicBlock* go_back,
- HBasicBlock* exit_block,
- HBasicBlock* break_block) {
- if (this->exit_block() != NULL) {
- this->exit_block()->Goto(body_entry, false);
- }
- if (go_back != NULL) {
- go_back->Goto(body_entry, true);
- }
+HBasicBlock* HGraphBuilder::CreateDoWhile(IterationStatement* statement,
+ HBasicBlock* body_entry,
+ HBasicBlock* go_back,
+ HBasicBlock* exit_block,
+ HBasicBlock* break_block) {
+ if (go_back != NULL) go_back->Goto(body_entry, true);
if (break_block != NULL) break_block->SetJoinId(statement->ExitId());
- exit_block_ =
- JoinBlocks(exit_block, break_block, statement->ExitId());
+ HBasicBlock* new_exit =
+ CreateJoin(exit_block, break_block, statement->ExitId());
body_entry->PostProcessLoopHeader(statement);
+ return new_exit;
}
-void HSubgraph::AppendWhile(IterationStatement* statement,
- HBasicBlock* condition_entry,
- HBasicBlock* exit_block,
- HBasicBlock* body_exit,
- HBasicBlock* break_block,
- HBasicBlock* loop_entry,
- HBasicBlock* loop_exit) {
- if (this->exit_block() != NULL) {
- this->exit_block()->Goto(condition_entry, false);
- }
-
+HBasicBlock* HGraphBuilder::CreateWhile(IterationStatement* statement,
+ HBasicBlock* condition_entry,
+ HBasicBlock* exit_block,
+ HBasicBlock* body_exit,
+ HBasicBlock* break_block,
+ HBasicBlock* loop_entry,
+ HBasicBlock* loop_exit) {
if (break_block != NULL) break_block->SetJoinId(statement->ExitId());
- exit_block_ =
- JoinBlocks(exit_block, break_block, statement->ExitId());
+ HBasicBlock* new_exit =
+ CreateJoin(exit_block, break_block, statement->ExitId());
if (loop_entry != NULL) {
- if (body_exit != NULL) {
- body_exit->Goto(loop_entry, true);
- }
+ if (body_exit != NULL) body_exit->Goto(loop_entry, true);
loop_entry->SetJoinId(statement->EntryId());
- exit_block_ = JoinBlocks(exit_block_, loop_exit, statement->ExitId());
+ new_exit = CreateJoin(new_exit, loop_exit, statement->ExitId());
} else {
- if (body_exit != NULL) {
- body_exit->Goto(condition_entry, true);
- }
+ if (body_exit != NULL) body_exit->Goto(condition_entry, true);
}
condition_entry->PostProcessLoopHeader(statement);
+ return new_exit;
}
-void HSubgraph::Append(BreakableStatement* stmt,
- HBasicBlock* entry_block,
- HBasicBlock* exit_block,
- HBasicBlock* break_block) {
- exit_block_->Goto(entry_block);
- exit_block_ = exit_block;
-
- if (stmt != NULL) {
- entry_block->SetJoinId(stmt->EntryId());
- if (break_block != NULL) break_block->SetJoinId(stmt->EntryId());
- exit_block_ = JoinBlocks(exit_block, break_block, stmt->ExitId());
- }
-}
-
-
-void HSubgraph::FinishExit(HControlInstruction* instruction) {
- ASSERT(exit_block() != NULL);
- exit_block_->Finish(instruction);
- exit_block_->ClearEnvironment();
- exit_block_ = NULL;
+void HBasicBlock::FinishExit(HControlInstruction* instruction) {
+ Finish(instruction);
+ ClearEnvironment();
}
@@ -2165,16 +2121,16 @@ HGraph* HGraphBuilder::CreateGraph(CompilationInfo* info) {
ZoneList<Statement*>* stmts = info->function()->body();
HSubgraph* body = CreateGotoSubgraph(environment());
+ current_block()->Goto(body->entry_block());
AddToSubgraph(body, stmts);
if (HasStackOverflow()) return NULL;
- current_subgraph_->Append(NULL,
- body->entry_block(),
- body->exit_block(),
- NULL);
body->entry_block()->SetJoinId(info->function()->id());
+ set_current_block(body->exit_block());
if (graph()->exit_block() != NULL) {
- graph_->FinishExit(new HReturn(graph_->GetConstantUndefined()));
+ HReturn* instr = new HReturn(graph()->GetConstantUndefined());
+ graph()->exit_block()->FinishExit(instr);
+ graph()->set_exit_block(NULL);
}
}
@@ -2375,14 +2331,17 @@ HSubgraph* HGraphBuilder::CreateLoopHeaderSubgraph(HEnvironment* env) {
void HGraphBuilder::VisitBlock(Block* stmt) {
if (stmt->labels() != NULL) {
HSubgraph* block_graph = CreateGotoSubgraph(environment());
+ current_block()->Goto(block_graph->entry_block());
+ block_graph->entry_block()->SetJoinId(stmt->EntryId());
BreakAndContinueInfo break_info(stmt);
{ BreakAndContinueScope push(&break_info, this);
ADD_TO_SUBGRAPH(block_graph, stmt->statements());
}
- subgraph()->Append(stmt,
- block_graph->entry_block(),
- block_graph->exit_block(),
- break_info.break_block());
+ HBasicBlock* break_block = break_info.break_block();
+ if (break_block != NULL) break_block->SetJoinId(stmt->EntryId());
+ set_current_block(CreateJoin(block_graph->exit_block(),
+ break_block,
+ stmt->ExitId()));
} else {
VisitStatements(stmt->statements());
}
@@ -2418,9 +2377,9 @@ void HGraphBuilder::VisitIfStatement(IfStatement* stmt) {
else_graph->entry_block()->SetJoinId(stmt->ElseId());
ADD_TO_SUBGRAPH(else_graph, stmt->else_statement());
- current_subgraph_->AppendJoin(then_graph->exit_block(),
- else_graph->exit_block(),
- stmt->id());
+ set_current_block(CreateJoin(then_graph->exit_block(),
+ else_graph->exit_block(),
+ stmt->id()));
}
}
@@ -2476,7 +2435,8 @@ void HGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
// Not an inlined return, so an actual one.
VISIT_FOR_VALUE(stmt->expression());
HValue* result = environment()->Pop();
- subgraph()->FinishExit(new HReturn(result));
+ current_block()->FinishExit(new HReturn(result));
+ set_current_block(NULL);
} else {
// Return from an inlined function, visit the subexpression in the
// expression context of the call.
@@ -2496,7 +2456,7 @@ void HGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
return_value = environment()->Pop();
}
current_block()->AddLeaveInlined(return_value,
- function_return_);
+ function_return_);
set_current_block(NULL);
}
}
@@ -2685,54 +2645,59 @@ bool HGraph::HasOsrEntryAt(IterationStatement* statement) {
}
-void HSubgraph::PreProcessOsrEntry(IterationStatement* statement) {
+void HGraphBuilder::PreProcessOsrEntry(IterationStatement* statement) {
if (!graph()->HasOsrEntryAt(statement)) return;
HBasicBlock* non_osr_entry = graph()->CreateBasicBlock();
HBasicBlock* osr_entry = graph()->CreateBasicBlock();
HValue* true_value = graph()->GetConstantTrue();
HTest* test = new HTest(true_value, non_osr_entry, osr_entry);
- exit_block()->Finish(test);
+ current_block()->Finish(test);
HBasicBlock* loop_predecessor = graph()->CreateBasicBlock();
non_osr_entry->Goto(loop_predecessor);
+ set_current_block(osr_entry);
int osr_entry_id = statement->OsrEntryId();
// We want the correct environment at the OsrEntry instruction. Build
// it explicitly. The expression stack should be empty.
- int count = osr_entry->last_environment()->length();
- ASSERT(count == (osr_entry->last_environment()->parameter_count() +
- osr_entry->last_environment()->local_count()));
+ int count = environment()->length();
+ ASSERT(count ==
+ (environment()->parameter_count() + environment()->local_count()));
for (int i = 0; i < count; ++i) {
HUnknownOSRValue* unknown = new HUnknownOSRValue;
- osr_entry->AddInstruction(unknown);
- osr_entry->last_environment()->Bind(i, unknown);
+ AddInstruction(unknown);
+ environment()->Bind(i, unknown);
}
- osr_entry->AddSimulate(osr_entry_id);
- osr_entry->AddInstruction(new HOsrEntry(osr_entry_id));
- osr_entry->Goto(loop_predecessor);
+ AddSimulate(osr_entry_id);
+ AddInstruction(new HOsrEntry(osr_entry_id));
+ current_block()->Goto(loop_predecessor);
loop_predecessor->SetJoinId(statement->EntryId());
- set_exit_block(loop_predecessor);
+ set_current_block(loop_predecessor);
}
void HGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
ASSERT(current_block() != NULL);
- subgraph()->PreProcessOsrEntry(stmt);
+ PreProcessOsrEntry(stmt);
HSubgraph* body_graph = CreateLoopHeaderSubgraph(environment());
+ current_block()->Goto(body_graph->entry_block(), false);
BreakAndContinueInfo break_info(stmt);
{ BreakAndContinueScope push(&break_info, this);
ADD_TO_SUBGRAPH(body_graph, stmt->body());
}
- body_graph->ResolveContinue(stmt, break_info.continue_block());
+ HBasicBlock* body_exit = JoinContinue(stmt,
+ body_graph->exit_block(),
+ break_info.continue_block());
+ body_graph->set_exit_block(body_exit);
if (body_graph->exit_block() == NULL || stmt->cond()->ToBooleanIsTrue()) {
- subgraph()->AppendEndless(stmt,
- body_graph->entry_block(),
- body_graph->exit_block(),
- break_info.break_block());
+ set_current_block(CreateEndless(stmt,
+ body_graph->entry_block(),
+ body_graph->exit_block(),
+ break_info.break_block()));
} else {
HSubgraph* go_back = CreateEmptySubgraph();
HSubgraph* exit = CreateEmptySubgraph();
@@ -2744,18 +2709,18 @@ void HGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
go_back->entry_block()->SetJoinId(stmt->BackEdgeId());
exit->entry_block()->SetJoinId(stmt->ExitId());
}
- subgraph()->AppendDoWhile(stmt,
- body_graph->entry_block(),
- go_back->exit_block(),
- exit->exit_block(),
- break_info.break_block());
+ set_current_block(CreateDoWhile(stmt,
+ body_graph->entry_block(),
+ go_back->exit_block(),
+ exit->exit_block(),
+ break_info.break_block()));
}
}
void HGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
ASSERT(current_block() != NULL);
- subgraph()->PreProcessOsrEntry(stmt);
+ PreProcessOsrEntry(stmt);
HSubgraph* cond_graph = NULL;
HSubgraph* body_graph = NULL;
@@ -2764,8 +2729,10 @@ void HGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
// If the condition is constant true, do not generate a condition subgraph.
if (stmt->cond()->ToBooleanIsTrue()) {
body_graph = CreateLoopHeaderSubgraph(environment());
+ current_block()->Goto(body_graph->entry_block(), false);
} else {
cond_graph = CreateLoopHeaderSubgraph(environment());
+ current_block()->Goto(cond_graph->entry_block(), false);
body_graph = CreateEmptySubgraph();
exit_graph = CreateEmptySubgraph();
{
@@ -2782,29 +2749,32 @@ void HGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
{ BreakAndContinueScope push(&break_info, this);
ADD_TO_SUBGRAPH(body_graph, stmt->body());
}
- body_graph->ResolveContinue(stmt, break_info.continue_block());
+ HBasicBlock* body_exit = JoinContinue(stmt,
+ body_graph->exit_block(),
+ break_info.continue_block());
+ body_graph->set_exit_block(body_exit);
if (cond_graph != NULL) {
- AppendPeeledWhile(stmt,
- cond_graph->entry_block(),
- exit_graph->exit_block(),
- body_graph->exit_block(),
- break_info.break_block());
+ set_current_block(CreatePeeledWhile(stmt,
+ cond_graph->entry_block(),
+ exit_graph->exit_block(),
+ body_graph->exit_block(),
+ break_info.break_block()));
} else {
// TODO(fschneider): Implement peeling for endless loops as well.
- subgraph()->AppendEndless(stmt,
- body_graph->entry_block(),
- body_graph->exit_block(),
- break_info.break_block());
+ set_current_block(CreateEndless(stmt,
+ body_graph->entry_block(),
+ body_graph->exit_block(),
+ break_info.break_block()));
}
}
-void HGraphBuilder::AppendPeeledWhile(IterationStatement* stmt,
- HBasicBlock* condition_entry,
- HBasicBlock* exit_block,
- HBasicBlock* body_exit,
- HBasicBlock* break_block) {
+HBasicBlock* HGraphBuilder::CreatePeeledWhile(IterationStatement* stmt,
+ HBasicBlock* condition_entry,
+ HBasicBlock* exit_block,
+ HBasicBlock* body_exit,
+ HBasicBlock* break_block) {
HBasicBlock* loop_entry = NULL;
HBasicBlock* loop_exit = NULL;
if (FLAG_use_peeling && body_exit != NULL && stmt != peeled_statement_) {
@@ -2812,18 +2782,19 @@ void HGraphBuilder::AppendPeeledWhile(IterationStatement* stmt,
IterationStatement* outer_peeled_statement = peeled_statement_;
peeled_statement_ = stmt;
HSubgraph* loop = CreateGotoSubgraph(body_exit->last_environment());
- ADD_TO_SUBGRAPH(loop, stmt);
+ AddToSubgraph(loop, stmt);
peeled_statement_ = outer_peeled_statement;
+ if (HasStackOverflow()) return NULL;
loop_entry = loop->entry_block();
loop_exit = loop->exit_block();
}
- subgraph()->AppendWhile(stmt,
- condition_entry,
- exit_block,
- body_exit,
- break_block,
- loop_entry,
- loop_exit);
+ return CreateWhile(stmt,
+ condition_entry,
+ exit_block,
+ body_exit,
+ break_block,
+ loop_entry,
+ loop_exit);
}
@@ -2834,13 +2805,14 @@ void HGraphBuilder::VisitForStatement(ForStatement* stmt) {
CHECK_BAILOUT;
}
ASSERT(current_block() != NULL);
- subgraph()->PreProcessOsrEntry(stmt);
+ PreProcessOsrEntry(stmt);
HSubgraph* cond_graph = NULL;
HSubgraph* body_graph = NULL;
HSubgraph* exit_graph = NULL;
if (stmt->cond() != NULL) {
cond_graph = CreateLoopHeaderSubgraph(environment());
+ current_block()->Goto(cond_graph->entry_block(), false);
body_graph = CreateEmptySubgraph();
exit_graph = CreateEmptySubgraph();
{
@@ -2853,6 +2825,7 @@ void HGraphBuilder::VisitForStatement(ForStatement* stmt) {
}
} else {
body_graph = CreateLoopHeaderSubgraph(environment());
+ current_block()->Goto(body_graph->entry_block(), false);
}
BreakAndContinueInfo break_info(stmt);
{ BreakAndContinueScope push(&break_info, this);
@@ -2860,30 +2833,31 @@ void HGraphBuilder::VisitForStatement(ForStatement* stmt) {
}
HSubgraph* next_graph = NULL;
- body_graph->ResolveContinue(stmt, break_info.continue_block());
+ HBasicBlock* body_exit = JoinContinue(stmt,
+ body_graph->exit_block(),
+ break_info.continue_block());
+ body_graph->set_exit_block(body_exit);
if (stmt->next() != NULL && body_graph->exit_block() != NULL) {
next_graph =
CreateGotoSubgraph(body_graph->exit_block()->last_environment());
- ADD_TO_SUBGRAPH(next_graph, stmt->next());
- body_graph->Append(NULL,
- next_graph->entry_block(),
- next_graph->exit_block(),
- NULL);
+ body_graph->exit_block()->Goto(next_graph->entry_block());
next_graph->entry_block()->SetJoinId(stmt->ContinueId());
+ ADD_TO_SUBGRAPH(next_graph, stmt->next());
+ body_graph->set_exit_block(next_graph->exit_block());
}
if (cond_graph != NULL) {
- AppendPeeledWhile(stmt,
- cond_graph->entry_block(),
- exit_graph->exit_block(),
- body_graph->exit_block(),
- break_info.break_block());
+ set_current_block(CreatePeeledWhile(stmt,
+ cond_graph->entry_block(),
+ exit_graph->exit_block(),
+ body_graph->exit_block(),
+ break_info.break_block()));
} else {
- subgraph()->AppendEndless(stmt,
- body_graph->entry_block(),
- body_graph->exit_block(),
- break_info.break_block());
+ set_current_block(CreateEndless(stmt,
+ body_graph->entry_block(),
+ body_graph->exit_block(),
+ break_info.break_block()));
}
}
@@ -2937,9 +2911,9 @@ void HGraphBuilder::VisitConditional(Conditional* expr) {
else_graph->entry_block()->SetJoinId(expr->ElseId());
ADD_TO_SUBGRAPH(else_graph, expr->else_expression());
- current_subgraph_->AppendJoin(then_graph->exit_block(),
- else_graph->exit_block(),
- expr->id());
+ set_current_block(CreateJoin(then_graph->exit_block(),
+ else_graph->exit_block(),
+ expr->id()));
ast_context()->ReturnValue(Pop());
}
@@ -3317,7 +3291,8 @@ void HGraphBuilder::HandlePolymorphicStoreNamedField(Assignment* expr,
HSubgraph* default_graph = CreateBranchSubgraph(environment());
{ SubgraphScope scope(this, default_graph);
if (!needs_generic && FLAG_deoptimize_uncommon_cases) {
- default_graph->FinishExit(new HDeoptimize());
+ default_graph->exit_block()->FinishExit(new HDeoptimize());
+ default_graph->set_exit_block(NULL);
} else {
HInstruction* instr = BuildStoreNamedGeneric(object, name, value);
Push(value);
@@ -3604,7 +3579,8 @@ void HGraphBuilder::VisitThrow(Throw* expr) {
instr->set_position(expr->position());
AddInstruction(instr);
AddSimulate(expr->id());
- current_subgraph_->FinishExit(new HAbnormalExit);
+ current_block()->FinishExit(new HAbnormalExit);
+ set_current_block(NULL);
}
@@ -3652,7 +3628,8 @@ void HGraphBuilder::HandlePolymorphicLoadNamedField(Property* expr,
HSubgraph* default_graph = CreateBranchSubgraph(environment());
{ SubgraphScope scope(this, default_graph);
if (!needs_generic && FLAG_deoptimize_uncommon_cases) {
- default_graph->FinishExit(new HDeoptimize());
+ default_graph->exit_block()->FinishExit(new HDeoptimize());
+ default_graph->set_exit_block(NULL);
} else {
HInstruction* instr = BuildLoadNamedGeneric(object, expr);
instr->set_position(expr->position());
@@ -4010,7 +3987,8 @@ void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
HSubgraph* default_graph = CreateBranchSubgraph(environment());
{ SubgraphScope scope(this, default_graph);
if (!needs_generic && FLAG_deoptimize_uncommon_cases) {
- default_graph->FinishExit(new HDeoptimize());
+ default_graph->exit_block()->FinishExit(new HDeoptimize());
+ default_graph->set_exit_block(NULL);
} else {
HContext* context = new HContext;
AddInstruction(context);
@@ -4091,6 +4069,8 @@ bool HGraphBuilder::TryInline(Call* expr) {
!Scope::Analyze(&inner_info)) {
if (Top::has_pending_exception()) {
SetStackOverflow();
+ // Stop trying to optimize and inline this function.
+ target->shared()->set_optimization_disabled(true);
}
return false;
}
@@ -4730,9 +4710,9 @@ void HGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) {
false_graph->exit_block()->last_environment()->Push(
graph_->GetConstantFalse());
- current_subgraph_->AppendJoin(true_graph->exit_block(),
- false_graph->exit_block(),
- expr->id());
+ set_current_block(CreateJoin(true_graph->exit_block(),
+ false_graph->exit_block(),
+ expr->id()));
ast_context()->ReturnValue(Pop());
} else {
ASSERT(ast_context()->IsEffect());
diff --git a/deps/v8/src/hydrogen.h b/deps/v8/src/hydrogen.h
index e8c0b0630..bd222f490 100644
--- a/deps/v8/src/hydrogen.h
+++ b/deps/v8/src/hydrogen.h
@@ -117,6 +117,7 @@ class HBasicBlock: public ZoneObject {
void SetJoinId(int id);
void Finish(HControlInstruction* last);
+ void FinishExit(HControlInstruction* instruction);
void Goto(HBasicBlock* block, bool include_stack_check = false);
int PredecessorIndexOf(HBasicBlock* predecessor) const;
@@ -206,34 +207,6 @@ class HSubgraph: public ZoneObject {
exit_block_ = block;
}
- void PreProcessOsrEntry(IterationStatement* statement);
-
- void AppendJoin(HBasicBlock* first, HBasicBlock* second, int join_id);
- void AppendWhile(IterationStatement* statement,
- HBasicBlock* condition_entry,
- HBasicBlock* exit_block,
- HBasicBlock* body_exit,
- HBasicBlock* break_block,
- HBasicBlock* loop_entry,
- HBasicBlock* loop_exit);
- void AppendDoWhile(IterationStatement* statement,
- HBasicBlock* body_entry,
- HBasicBlock* go_back,
- HBasicBlock* exit_block,
- HBasicBlock* break_block);
- void AppendEndless(IterationStatement* statement,
- HBasicBlock* body_entry,
- HBasicBlock* body_exit,
- HBasicBlock* break_block);
- void Append(BreakableStatement* stmt,
- HBasicBlock* entry_block,
- HBasicBlock* exit_block,
- HBasicBlock* break_block);
- void ResolveContinue(IterationStatement* statement,
- HBasicBlock* continue_block);
- HBasicBlock* JoinBlocks(HBasicBlock* a, HBasicBlock* b, int id);
-
- void FinishExit(HControlInstruction* instruction);
void Initialize(HBasicBlock* block) {
ASSERT(entry_block_ == NULL);
entry_block_ = block;
@@ -698,11 +671,36 @@ class HGraphBuilder: public AstVisitor {
void Bailout(const char* reason);
- void AppendPeeledWhile(IterationStatement* stmt,
- HBasicBlock* condition_entry,
- HBasicBlock* exit_block,
- HBasicBlock* body_exit,
- HBasicBlock* break_block);
+ void PreProcessOsrEntry(IterationStatement* statement);
+
+ HBasicBlock* CreateJoin(HBasicBlock* first,
+ HBasicBlock* second,
+ int join_id);
+ HBasicBlock* CreateWhile(IterationStatement* statement,
+ HBasicBlock* condition_entry,
+ HBasicBlock* exit_block,
+ HBasicBlock* body_exit,
+ HBasicBlock* break_block,
+ HBasicBlock* loop_entry,
+ HBasicBlock* loop_exit);
+ HBasicBlock* CreateDoWhile(IterationStatement* statement,
+ HBasicBlock* body_entry,
+ HBasicBlock* go_back,
+ HBasicBlock* exit_block,
+ HBasicBlock* break_block);
+ HBasicBlock* CreateEndless(IterationStatement* statement,
+ HBasicBlock* body_entry,
+ HBasicBlock* body_exit,
+ HBasicBlock* break_block);
+ HBasicBlock* CreatePeeledWhile(IterationStatement* stmt,
+ HBasicBlock* condition_entry,
+ HBasicBlock* exit_block,
+ HBasicBlock* body_exit,
+ HBasicBlock* break_block);
+ HBasicBlock* JoinContinue(IterationStatement* statement,
+ HBasicBlock* exit_block,
+ HBasicBlock* continue_block);
+
void AddToSubgraph(HSubgraph* graph, ZoneList<Statement*>* stmts);
void AddToSubgraph(HSubgraph* graph, Statement* stmt);
diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc
index ae544dc63..770ec0b72 100644
--- a/deps/v8/src/ia32/codegen-ia32.cc
+++ b/deps/v8/src/ia32/codegen-ia32.cc
@@ -5360,10 +5360,20 @@ void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
void CodeGenerator::VisitLiteral(Literal* node) {
Comment cmnt(masm_, "[ Literal");
- if (in_safe_int32_mode()) {
- frame_->PushUntaggedElement(node->handle());
+ if (frame_->ConstantPoolOverflowed()) {
+ Result temp = allocator_->Allocate();
+ ASSERT(temp.is_valid());
+ if (in_safe_int32_mode()) {
+ temp.set_untagged_int32(true);
+ }
+ __ Set(temp.reg(), Immediate(node->handle()));
+ frame_->Push(&temp);
} else {
- frame_->Push(node->handle());
+ if (in_safe_int32_mode()) {
+ frame_->PushUntaggedElement(node->handle());
+ } else {
+ frame_->Push(node->handle());
+ }
}
}
diff --git a/deps/v8/src/ia32/full-codegen-ia32.cc b/deps/v8/src/ia32/full-codegen-ia32.cc
index 3cdca4c62..4255347ba 100644
--- a/deps/v8/src/ia32/full-codegen-ia32.cc
+++ b/deps/v8/src/ia32/full-codegen-ia32.cc
@@ -548,7 +548,7 @@ void FullCodeGenerator::DoTest(Label* if_true,
__ j(equal, if_true);
__ cmp(result_register(), Factory::false_value());
__ j(equal, if_false);
- ASSERT_EQ(0, kSmiTag);
+ STATIC_ASSERT(kSmiTag == 0);
__ test(result_register(), Operand(result_register()));
__ j(zero, if_false);
__ test(result_register(), Immediate(kSmiTagMask));
@@ -655,6 +655,7 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
ASSERT(variable != NULL); // Must have been resolved.
Slot* slot = variable->AsSlot();
Property* prop = variable->AsProperty();
+
if (slot != NULL) {
switch (slot->type()) {
case Slot::PARAMETER:
@@ -814,7 +815,6 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
SetSourcePosition(clause->position());
Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
EmitCallIC(ic, &patch_site);
-
__ test(eax, Operand(eax));
__ j(not_equal, &next_test);
__ Drop(1); // Switch value is no longer needed.
@@ -895,7 +895,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ cmp(edx, Factory::empty_descriptor_array());
__ j(equal, &call_runtime);
- // Check that there in an enum cache in the non-empty instance
+ // Check that there is an enum cache in the non-empty instance
// descriptors (edx). This is the case if the next enumeration
// index field does not contain a smi.
__ mov(edx, FieldOperand(edx, DescriptorArray::kEnumerationIndexOffset));
@@ -2390,7 +2390,9 @@ void FullCodeGenerator::VisitCall(Call* expr) {
Literal* key = prop->key()->AsLiteral();
if (key != NULL && key->handle()->IsSymbol()) {
// Call to a named property, use call IC.
- VisitForStackValue(prop->obj());
+ { PreservePositionScope scope(masm()->positions_recorder());
+ VisitForStackValue(prop->obj());
+ }
EmitCallWithIC(expr, key->handle(), RelocInfo::CODE_TARGET);
} else {
// Call to a keyed property.
@@ -3401,7 +3403,6 @@ void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
-
VisitForAccumulatorValue(args->at(0));
if (FLAG_debug_code) {
@@ -3791,6 +3792,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
+
// Notice that the labels are swapped.
context()->PrepareTest(&materialize_true, &materialize_false,
&if_false, &if_true, &fall_through);
@@ -4386,6 +4388,22 @@ void FullCodeGenerator::EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode) {
void FullCodeGenerator::EmitCallIC(Handle<Code> ic, JumpPatchSite* patch_site) {
+ switch (ic->kind()) {
+ case Code::LOAD_IC:
+ __ IncrementCounter(&Counters::named_load_full, 1);
+ break;
+ case Code::KEYED_LOAD_IC:
+ __ IncrementCounter(&Counters::keyed_load_full, 1);
+ break;
+ case Code::STORE_IC:
+ __ IncrementCounter(&Counters::named_store_full, 1);
+ break;
+ case Code::KEYED_STORE_IC:
+ __ IncrementCounter(&Counters::keyed_store_full, 1);
+ default:
+ break;
+ }
+
__ call(ic, RelocInfo::CODE_TARGET);
if (patch_site != NULL && patch_site->is_bound()) {
patch_site->EmitPatchInfo();
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.cc b/deps/v8/src/ia32/lithium-codegen-ia32.cc
index d61ebdc0f..c7424a586 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.cc
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.cc
@@ -127,7 +127,7 @@ bool LCodeGen::GenerateRelocPadding() {
int reloc_size = masm()->relocation_writer_size();
while (reloc_size < deoptimization_reloc_size.min_size) {
__ RecordComment(RelocInfo::kFillerCommentString, true);
- reloc_size += RelocInfo::kRelocCommentSize;
+ reloc_size += RelocInfo::kMinRelocCommentSize;
}
return !is_aborted();
}
diff --git a/deps/v8/src/ia32/virtual-frame-ia32.cc b/deps/v8/src/ia32/virtual-frame-ia32.cc
index 1cc91a9fe..515a9fe97 100644
--- a/deps/v8/src/ia32/virtual-frame-ia32.cc
+++ b/deps/v8/src/ia32/virtual-frame-ia32.cc
@@ -1306,6 +1306,7 @@ void VirtualFrame::EmitPush(Immediate immediate, TypeInfo info) {
void VirtualFrame::PushUntaggedElement(Handle<Object> value) {
+ ASSERT(!ConstantPoolOverflowed());
elements_.Add(FrameElement::ConstantElement(value, FrameElement::NOT_SYNCED));
elements_[element_count() - 1].set_untagged_int32(true);
}
diff --git a/deps/v8/src/ia32/virtual-frame-ia32.h b/deps/v8/src/ia32/virtual-frame-ia32.h
index 729469fdc..93362b427 100644
--- a/deps/v8/src/ia32/virtual-frame-ia32.h
+++ b/deps/v8/src/ia32/virtual-frame-ia32.h
@@ -419,6 +419,8 @@ class VirtualFrame: public ZoneObject {
void EmitPush(Immediate immediate,
TypeInfo info = TypeInfo::Unknown());
+ inline bool ConstantPoolOverflowed();
+
// Push an element on the virtual frame.
inline void Push(Register reg, TypeInfo info = TypeInfo::Unknown());
inline void Push(Handle<Object> value);
diff --git a/deps/v8/src/ic.cc b/deps/v8/src/ic.cc
index 7482830ae..4f75ade0e 100644
--- a/deps/v8/src/ic.cc
+++ b/deps/v8/src/ic.cc
@@ -804,6 +804,7 @@ MaybeObject* KeyedCallIC::LoadFunction(State state,
HandleScope scope;
Handle<Object> result = GetProperty(object, key);
+ RETURN_IF_EMPTY_HANDLE(result);
// Make receiver an object if the callee requires it. Strict mode or builtin
// functions do not wrap the receiver, non-strict functions and objects
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index 24887a0ef..f955d334d 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -769,6 +769,10 @@ bool Object::HasSpecificClassOf(String* name) {
MaybeObject* Object::GetElement(uint32_t index) {
+ // GetElement can trigger a getter which can cause allocation.
+ // This was not always the case. This ASSERT is here to catch
+ // leftover incorrect uses.
+ ASSERT(Heap::IsAllocationAllowed());
return GetElementWithReceiver(this, index);
}
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index 2d100529e..0b1d72a92 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -5476,9 +5476,11 @@ uint32_t JSFunction::SourceHash() {
bool JSFunction::IsInlineable() {
if (IsBuiltin()) return false;
+ SharedFunctionInfo* shared_info = shared();
// Check that the function has a script associated with it.
- if (!shared()->script()->IsScript()) return false;
- Code* code = shared()->code();
+ if (!shared_info->script()->IsScript()) return false;
+ if (shared_info->optimization_disabled()) return false;
+ Code* code = shared_info->code();
if (code->kind() == Code::OPTIMIZED_FUNCTION) return true;
// If we never ran this (unlikely) then lets try to optimize it.
if (code->kind() != Code::FUNCTION) return true;
@@ -7277,8 +7279,10 @@ MaybeObject* JSObject::GetElementPostInterceptor(Object* receiver,
case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
case EXTERNAL_FLOAT_ELEMENTS: {
- MaybeObject* value = GetExternalElement(index);
- if (!value->ToObjectUnchecked()->IsUndefined()) return value;
+ MaybeObject* maybe_value = GetExternalElement(index);
+ Object* value;
+ if (!maybe_value->ToObject(&value)) return maybe_value;
+ if (!value->IsUndefined()) return value;
break;
}
case DICTIONARY_ELEMENTS: {
@@ -7374,8 +7378,10 @@ MaybeObject* JSObject::GetElementWithReceiver(Object* receiver,
case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
case EXTERNAL_FLOAT_ELEMENTS: {
- MaybeObject* value = GetExternalElement(index);
- if (!value->ToObjectUnchecked()->IsUndefined()) return value;
+ MaybeObject* maybe_value = GetExternalElement(index);
+ Object* value;
+ if (!maybe_value->ToObject(&value)) return maybe_value;
+ if (!value->IsUndefined()) return value;
break;
}
case DICTIONARY_ELEMENTS: {
diff --git a/deps/v8/src/runtime.cc b/deps/v8/src/runtime.cc
index 5a443efc3..dce2e1567 100644
--- a/deps/v8/src/runtime.cc
+++ b/deps/v8/src/runtime.cc
@@ -7020,7 +7020,7 @@ static MaybeObject* Runtime_LazyRecompile(Arguments args) {
function->ReplaceCode(function->shared()->code());
return function->code();
}
- if (CompileOptimized(function, AstNode::kNoNumber)) {
+ if (CompileOptimized(function, AstNode::kNoNumber, CLEAR_EXCEPTION)) {
return function->code();
}
if (FLAG_trace_opt) {
@@ -7029,7 +7029,7 @@ static MaybeObject* Runtime_LazyRecompile(Arguments args) {
PrintF(": optimized compilation failed]\n");
}
function->ReplaceCode(function->shared()->code());
- return Failure::Exception();
+ return function->code();
}
@@ -7189,7 +7189,8 @@ static MaybeObject* Runtime_CompileForOnStackReplacement(Arguments args) {
// Try to compile the optimized code. A true return value from
// CompileOptimized means that compilation succeeded, not necessarily
// that optimization succeeded.
- if (CompileOptimized(function, ast_id) && function->IsOptimized()) {
+ if (CompileOptimized(function, ast_id, CLEAR_EXCEPTION) &&
+ function->IsOptimized()) {
DeoptimizationInputData* data = DeoptimizationInputData::cast(
function->code()->deoptimization_data());
if (data->OsrPcOffset()->value() >= 0) {
@@ -7232,6 +7233,9 @@ static MaybeObject* Runtime_CompileForOnStackReplacement(Arguments args) {
ASSERT(function->code()->kind() == Code::OPTIMIZED_FUNCTION);
return Smi::FromInt(ast_id);
} else {
+ if (function->IsMarkedForLazyRecompilation()) {
+ function->ReplaceCode(function->shared()->code());
+ }
return Smi::FromInt(-1);
}
}
@@ -8028,377 +8032,449 @@ static MaybeObject* Runtime_PushIfAbsent(Arguments args) {
class ArrayConcatVisitor {
public:
ArrayConcatVisitor(Handle<FixedArray> storage,
- uint32_t index_limit,
bool fast_elements) :
- storage_(storage), index_limit_(index_limit),
- index_offset_(0), fast_elements_(fast_elements) { }
+ storage_(storage),
+ index_offset_(0u),
+ fast_elements_(fast_elements) { }
void visit(uint32_t i, Handle<Object> elm) {
- if (i >= index_limit_ - index_offset_) return;
+ if (i >= JSObject::kMaxElementCount - index_offset_) return;
uint32_t index = index_offset_ + i;
if (fast_elements_) {
- ASSERT(index < static_cast<uint32_t>(storage_->length()));
- storage_->set(index, *elm);
-
- } else {
- Handle<NumberDictionary> dict = Handle<NumberDictionary>::cast(storage_);
- Handle<NumberDictionary> result =
- Factory::DictionaryAtNumberPut(dict, index, elm);
- if (!result.is_identical_to(dict))
- storage_ = result;
+ if (index < static_cast<uint32_t>(storage_->length())) {
+ storage_->set(index, *elm);
+ return;
+ }
+ // Our initial estimate of length was foiled, possibly by
+ // getters on the arrays increasing the length of later arrays
+ // during iteration.
+ // This shouldn't happen in anything but pathological cases.
+ SetDictionaryMode(index);
+ // Fall-through to dictionary mode.
}
- }
+ ASSERT(!fast_elements_);
+ Handle<NumberDictionary> dict(storage_.cast<NumberDictionary>());
+ Handle<NumberDictionary> result =
+ Factory::DictionaryAtNumberPut(dict, index, elm);
+ if (!result.is_identical_to(dict)) {
+ storage_ = Handle<FixedArray>::cast(result);
+ }
+}
void increase_index_offset(uint32_t delta) {
- if (index_limit_ - index_offset_ < delta) {
- index_offset_ = index_limit_;
+ if (JSObject::kMaxElementCount - index_offset_ < delta) {
+ index_offset_ = JSObject::kMaxElementCount;
} else {
index_offset_ += delta;
}
}
- Handle<FixedArray> storage() { return storage_; }
+ Handle<JSArray> ToArray() {
+ Handle<JSArray> array = Factory::NewJSArray(0);
+ Handle<Object> length =
+ Factory::NewNumber(static_cast<double>(index_offset_));
+ Handle<Map> map;
+ if (fast_elements_) {
+ map = Factory::GetFastElementsMap(Handle<Map>(array->map()));
+ } else {
+ map = Factory::GetSlowElementsMap(Handle<Map>(array->map()));
+ }
+ array->set_map(*map);
+ array->set_length(*length);
+ array->set_elements(*storage_);
+ return array;
+ }
private:
- Handle<FixedArray> storage_;
- // Limit on the accepted indices. Elements with indices larger than the
- // limit are ignored by the visitor.
- uint32_t index_limit_;
- // Index after last seen index. Always less than or equal to index_limit_.
+ // Convert storage to dictionary mode.
+ void SetDictionaryMode(uint32_t index) {
+ ASSERT(fast_elements_);
+ Handle<FixedArray> current_storage(storage_.ToHandle());
+ HandleCell<NumberDictionary> slow_storage(
+ Factory::NewNumberDictionary(current_storage->length()));
+ uint32_t current_length = static_cast<uint32_t>(current_storage->length());
+ for (uint32_t i = 0; i < current_length; i++) {
+ HandleScope loop_scope;
+ Handle<Object> element(current_storage->get(i));
+ if (!element->IsTheHole()) {
+ slow_storage =
+ Factory::DictionaryAtNumberPut(slow_storage.ToHandle(), i, element);
+ }
+ }
+ storage_ = slow_storage.cast<FixedArray>();
+ fast_elements_ = false;
+ }
+
+ HandleCell<FixedArray> storage_;
+ // Index after last seen index. Always less than or equal to
+ // JSObject::kMaxElementCount.
uint32_t index_offset_;
- const bool fast_elements_;
+ bool fast_elements_;
};
+static uint32_t EstimateElementCount(Handle<JSArray> array) {
+ uint32_t length = static_cast<uint32_t>(array->length()->Number());
+ int element_count = 0;
+ switch (array->GetElementsKind()) {
+ case JSObject::FAST_ELEMENTS: {
+ // Fast elements can't have lengths that are not representable by
+ // a 32-bit signed integer.
+ ASSERT(static_cast<int32_t>(FixedArray::kMaxLength) >= 0);
+ int fast_length = static_cast<int>(length);
+ Handle<FixedArray> elements(FixedArray::cast(array->elements()));
+ for (int i = 0; i < fast_length; i++) {
+ if (!elements->get(i)->IsTheHole()) element_count++;
+ }
+ break;
+ }
+ case JSObject::DICTIONARY_ELEMENTS: {
+ Handle<NumberDictionary> dictionary(
+ NumberDictionary::cast(array->elements()));
+ int capacity = dictionary->Capacity();
+ for (int i = 0; i < capacity; i++) {
+ Handle<Object> key(dictionary->KeyAt(i));
+ if (dictionary->IsKey(*key)) {
+ element_count++;
+ }
+ }
+ break;
+ }
+ default:
+ // External arrays are always dense.
+ return length;
+ }
+ // As an estimate, we assume that the prototype doesn't contain any
+ // inherited elements.
+ return element_count;
+}
+
+
+
template<class ExternalArrayClass, class ElementType>
-static uint32_t IterateExternalArrayElements(Handle<JSObject> receiver,
- bool elements_are_ints,
- bool elements_are_guaranteed_smis,
- uint32_t range,
- ArrayConcatVisitor* visitor) {
+static void IterateExternalArrayElements(Handle<JSObject> receiver,
+ bool elements_are_ints,
+ bool elements_are_guaranteed_smis,
+ ArrayConcatVisitor* visitor) {
Handle<ExternalArrayClass> array(
ExternalArrayClass::cast(receiver->elements()));
- uint32_t len = Min(static_cast<uint32_t>(array->length()), range);
+ uint32_t len = static_cast<uint32_t>(array->length());
- if (visitor != NULL) {
- if (elements_are_ints) {
- if (elements_are_guaranteed_smis) {
- for (uint32_t j = 0; j < len; j++) {
- Handle<Smi> e(Smi::FromInt(static_cast<int>(array->get(j))));
+ ASSERT(visitor != NULL);
+ if (elements_are_ints) {
+ if (elements_are_guaranteed_smis) {
+ for (uint32_t j = 0; j < len; j++) {
+ HandleScope loop_scope;
+ Handle<Smi> e(Smi::FromInt(static_cast<int>(array->get(j))));
+ visitor->visit(j, e);
+ }
+ } else {
+ for (uint32_t j = 0; j < len; j++) {
+ HandleScope loop_scope;
+ int64_t val = static_cast<int64_t>(array->get(j));
+ if (Smi::IsValid(static_cast<intptr_t>(val))) {
+ Handle<Smi> e(Smi::FromInt(static_cast<int>(val)));
+ visitor->visit(j, e);
+ } else {
+ Handle<Object> e =
+ Factory::NewNumber(static_cast<ElementType>(val));
visitor->visit(j, e);
}
- } else {
- for (uint32_t j = 0; j < len; j++) {
- int64_t val = static_cast<int64_t>(array->get(j));
- if (Smi::IsValid(static_cast<intptr_t>(val))) {
- Handle<Smi> e(Smi::FromInt(static_cast<int>(val)));
- visitor->visit(j, e);
- } else {
- Handle<Object> e =
- Factory::NewNumber(static_cast<ElementType>(val));
- visitor->visit(j, e);
+ }
+ }
+ } else {
+ for (uint32_t j = 0; j < len; j++) {
+ HandleScope loop_scope;
+ Handle<Object> e = Factory::NewNumber(array->get(j));
+ visitor->visit(j, e);
+ }
+ }
+}
+
+
+// Used for sorting indices in a List<uint32_t>.
+static int compareUInt32(const uint32_t* ap, const uint32_t* bp) {
+ uint32_t a = *ap;
+ uint32_t b = *bp;
+ return (a == b) ? 0 : (a < b) ? -1 : 1;
+}
+
+
+static void CollectElementIndices(Handle<JSObject> object,
+ uint32_t range,
+ List<uint32_t>* indices) {
+ JSObject::ElementsKind kind = object->GetElementsKind();
+ switch (kind) {
+ case JSObject::FAST_ELEMENTS: {
+ Handle<FixedArray> elements(FixedArray::cast(object->elements()));
+ uint32_t length = static_cast<uint32_t>(elements->length());
+ if (range < length) length = range;
+ for (uint32_t i = 0; i < length; i++) {
+ if (!elements->get(i)->IsTheHole()) {
+ indices->Add(i);
+ }
+ }
+ break;
+ }
+ case JSObject::DICTIONARY_ELEMENTS: {
+ Handle<NumberDictionary> dict(NumberDictionary::cast(object->elements()));
+ uint32_t capacity = dict->Capacity();
+ for (uint32_t j = 0; j < capacity; j++) {
+ HandleScope loop_scope;
+ Handle<Object> k(dict->KeyAt(j));
+ if (dict->IsKey(*k)) {
+ ASSERT(k->IsNumber());
+ uint32_t index = static_cast<uint32_t>(k->Number());
+ if (index < range) {
+ indices->Add(index);
}
}
}
- } else {
- for (uint32_t j = 0; j < len; j++) {
- Handle<Object> e = Factory::NewNumber(array->get(j));
- visitor->visit(j, e);
+ break;
+ }
+ default: {
+ int dense_elements_length;
+ switch (kind) {
+ case JSObject::PIXEL_ELEMENTS: {
+ dense_elements_length =
+ PixelArray::cast(object->elements())->length();
+ break;
+ }
+ case JSObject::EXTERNAL_BYTE_ELEMENTS: {
+ dense_elements_length =
+ ExternalByteArray::cast(object->elements())->length();
+ break;
+ }
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS: {
+ dense_elements_length =
+ ExternalUnsignedByteArray::cast(object->elements())->length();
+ break;
+ }
+ case JSObject::EXTERNAL_SHORT_ELEMENTS: {
+ dense_elements_length =
+ ExternalShortArray::cast(object->elements())->length();
+ break;
+ }
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS: {
+ dense_elements_length =
+ ExternalUnsignedShortArray::cast(object->elements())->length();
+ break;
+ }
+ case JSObject::EXTERNAL_INT_ELEMENTS: {
+ dense_elements_length =
+ ExternalIntArray::cast(object->elements())->length();
+ break;
+ }
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS: {
+ dense_elements_length =
+ ExternalUnsignedIntArray::cast(object->elements())->length();
+ break;
+ }
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS: {
+ dense_elements_length =
+ ExternalFloatArray::cast(object->elements())->length();
+ break;
+ }
+ default:
+ UNREACHABLE();
+ dense_elements_length = 0;
+ break;
+ }
+ uint32_t length = static_cast<uint32_t>(dense_elements_length);
+ if (range <= length) {
+ length = range;
+ // We will add all indices, so we might as well clear it first
+ // and avoid duplicates.
+ indices->Clear();
+ }
+ for (uint32_t i = 0; i < length; i++) {
+ indices->Add(i);
}
+ if (length == range) return; // All indices accounted for already.
+ break;
}
}
- return len;
+ Handle<Object> prototype(object->GetPrototype());
+ if (prototype->IsJSObject()) {
+ // The prototype will usually have no inherited element indices,
+ // but we have to check.
+ CollectElementIndices(Handle<JSObject>::cast(prototype), range, indices);
+ }
}
+
/**
- * A helper function that visits elements of a JSObject. Only elements
- * whose index between 0 and range (exclusive) are visited.
+ * A helper function that visits elements of a JSArray in numerical
+ * order.
*
- * If the third parameter, visitor, is not NULL, the visitor is called
- * with parameters, 'visitor_index_offset + element index' and the element.
- *
- * It returns the number of visisted elements.
+ * The visitor argument called for each existing element in the array
+ * with the element index and the element's value.
+ * Afterwards it increments the base-index of the visitor by the array
+ * length.
*/
-static uint32_t IterateElements(Handle<JSObject> receiver,
- uint32_t range,
- ArrayConcatVisitor* visitor) {
- uint32_t num_of_elements = 0;
-
+static void IterateElements(Handle<JSArray> receiver,
+ ArrayConcatVisitor* visitor) {
+ uint32_t length = static_cast<uint32_t>(receiver->length()->Number());
switch (receiver->GetElementsKind()) {
case JSObject::FAST_ELEMENTS: {
+ // Run through the elements FixedArray and use HasElement and GetElement
+ // to check the prototype for missing elements.
Handle<FixedArray> elements(FixedArray::cast(receiver->elements()));
- uint32_t len = elements->length();
- if (range < len) {
- len = range;
- }
-
- for (uint32_t j = 0; j < len; j++) {
- Handle<Object> e(elements->get(j));
- if (!e->IsTheHole()) {
- num_of_elements++;
- if (visitor) {
- visitor->visit(j, e);
- }
+ int fast_length = static_cast<int>(length);
+ ASSERT(fast_length <= elements->length());
+ for (int j = 0; j < fast_length; j++) {
+ HandleScope loop_scope;
+ Handle<Object> element_value(elements->get(j));
+ if (!element_value->IsTheHole()) {
+ visitor->visit(j, element_value);
+ } else if (receiver->HasElement(j)) {
+ // Call GetElement on receiver, not its prototype, or getters won't
+ // have the correct receiver.
+ element_value = GetElement(receiver, j);
+ visitor->visit(j, element_value);
}
}
break;
}
+ case JSObject::DICTIONARY_ELEMENTS: {
+ Handle<NumberDictionary> dict(receiver->element_dictionary());
+ List<uint32_t> indices(dict->Capacity() / 2);
+ // Collect all indices in the object and the prototypes less
+ // than length. This might introduce duplicates in the indices list.
+ CollectElementIndices(receiver, length, &indices);
+ indices.Sort(&compareUInt32);
+ int j = 0;
+ int n = indices.length();
+ while (j < n) {
+ HandleScope loop_scope;
+ uint32_t index = indices[j];
+ Handle<Object> element = GetElement(receiver, index);
+ visitor->visit(index, element);
+ // Skip to next different index (i.e., omit duplicates).
+ do {
+ j++;
+ } while (j < n && indices[j] == index);
+ }
+ break;
+ }
case JSObject::PIXEL_ELEMENTS: {
Handle<PixelArray> pixels(PixelArray::cast(receiver->elements()));
- uint32_t len = pixels->length();
- if (range < len) {
- len = range;
- }
-
- for (uint32_t j = 0; j < len; j++) {
- num_of_elements++;
- if (visitor != NULL) {
- Handle<Smi> e(Smi::FromInt(pixels->get(j)));
- visitor->visit(j, e);
- }
+ for (uint32_t j = 0; j < length; j++) {
+ Handle<Smi> e(Smi::FromInt(pixels->get(j)));
+ visitor->visit(j, e);
}
break;
}
case JSObject::EXTERNAL_BYTE_ELEMENTS: {
- num_of_elements =
- IterateExternalArrayElements<ExternalByteArray, int8_t>(
- receiver, true, true, range, visitor);
+ IterateExternalArrayElements<ExternalByteArray, int8_t>(
+ receiver, true, true, visitor);
break;
}
case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS: {
- num_of_elements =
- IterateExternalArrayElements<ExternalUnsignedByteArray, uint8_t>(
- receiver, true, true, range, visitor);
+ IterateExternalArrayElements<ExternalUnsignedByteArray, uint8_t>(
+ receiver, true, true, visitor);
break;
}
case JSObject::EXTERNAL_SHORT_ELEMENTS: {
- num_of_elements =
- IterateExternalArrayElements<ExternalShortArray, int16_t>(
- receiver, true, true, range, visitor);
+ IterateExternalArrayElements<ExternalShortArray, int16_t>(
+ receiver, true, true, visitor);
break;
}
case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS: {
- num_of_elements =
- IterateExternalArrayElements<ExternalUnsignedShortArray, uint16_t>(
- receiver, true, true, range, visitor);
+ IterateExternalArrayElements<ExternalUnsignedShortArray, uint16_t>(
+ receiver, true, true, visitor);
break;
}
case JSObject::EXTERNAL_INT_ELEMENTS: {
- num_of_elements =
- IterateExternalArrayElements<ExternalIntArray, int32_t>(
- receiver, true, false, range, visitor);
+ IterateExternalArrayElements<ExternalIntArray, int32_t>(
+ receiver, true, false, visitor);
break;
}
case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS: {
- num_of_elements =
- IterateExternalArrayElements<ExternalUnsignedIntArray, uint32_t>(
- receiver, true, false, range, visitor);
+ IterateExternalArrayElements<ExternalUnsignedIntArray, uint32_t>(
+ receiver, true, false, visitor);
break;
}
case JSObject::EXTERNAL_FLOAT_ELEMENTS: {
- num_of_elements =
- IterateExternalArrayElements<ExternalFloatArray, float>(
- receiver, false, false, range, visitor);
- break;
- }
- case JSObject::DICTIONARY_ELEMENTS: {
- Handle<NumberDictionary> dict(receiver->element_dictionary());
- uint32_t capacity = dict->Capacity();
- for (uint32_t j = 0; j < capacity; j++) {
- Handle<Object> k(dict->KeyAt(j));
- if (dict->IsKey(*k)) {
- ASSERT(k->IsNumber());
- uint32_t index = static_cast<uint32_t>(k->Number());
- if (index < range) {
- num_of_elements++;
- if (visitor) {
- visitor->visit(index, Handle<Object>(dict->ValueAt(j)));
- }
- }
- }
- }
+ IterateExternalArrayElements<ExternalFloatArray, float>(
+ receiver, false, false, visitor);
break;
}
default:
UNREACHABLE();
break;
}
-
- return num_of_elements;
-}
-
-
-/**
- * A helper function that visits elements of an Array object, and elements
- * on its prototypes.
- *
- * Elements on prototypes are visited first, and only elements whose indices
- * less than Array length are visited.
- *
- * If a ArrayConcatVisitor object is given, the visitor is called with
- * parameters, element's index + visitor_index_offset and the element.
- *
- * The returned number of elements is an upper bound on the actual number
- * of elements added. If the same element occurs in more than one object
- * in the array's prototype chain, it will be counted more than once, but
- * will only occur once in the result.
- */
-static uint32_t IterateArrayAndPrototypeElements(Handle<JSArray> array,
- ArrayConcatVisitor* visitor) {
- uint32_t range = static_cast<uint32_t>(array->length()->Number());
- Handle<Object> obj = array;
-
- static const int kEstimatedPrototypes = 3;
- List< Handle<JSObject> > objects(kEstimatedPrototypes);
-
- // Visit prototype first. If an element on the prototype is shadowed by
- // the inheritor using the same index, the ArrayConcatVisitor visits
- // the prototype element before the shadowing element.
- // The visitor can simply overwrite the old value by new value using
- // the same index. This follows Array::concat semantics.
- while (!obj->IsNull()) {
- objects.Add(Handle<JSObject>::cast(obj));
- obj = Handle<Object>(obj->GetPrototype());
- }
-
- uint32_t nof_elements = 0;
- for (int i = objects.length() - 1; i >= 0; i--) {
- Handle<JSObject> obj = objects[i];
- uint32_t encountered_elements =
- IterateElements(Handle<JSObject>::cast(obj), range, visitor);
-
- if (encountered_elements > JSObject::kMaxElementCount - nof_elements) {
- nof_elements = JSObject::kMaxElementCount;
- } else {
- nof_elements += encountered_elements;
- }
- }
-
- return nof_elements;
-}
-
-
-/**
- * A helper function of Runtime_ArrayConcat.
- *
- * The first argument is an Array of arrays and objects. It is the
- * same as the arguments array of Array::concat JS function.
- *
- * If an argument is an Array object, the function visits array
- * elements. If an argument is not an Array object, the function
- * visits the object as if it is an one-element array.
- *
- * If the result array index overflows 32-bit unsigned integer, the rounded
- * non-negative number is used as new length. For example, if one
- * array length is 2^32 - 1, second array length is 1, the
- * concatenated array length is 0.
- * TODO(lrn) Change length behavior to ECMAScript 5 specification (length
- * is one more than the last array index to get a value assigned).
- */
-static uint32_t IterateArguments(Handle<JSArray> arguments,
- ArrayConcatVisitor* visitor) {
- uint32_t visited_elements = 0;
- uint32_t num_of_args = static_cast<uint32_t>(arguments->length()->Number());
-
- for (uint32_t i = 0; i < num_of_args; i++) {
- Object *element;
- MaybeObject* maybe_element = arguments->GetElement(i);
- // This if() is not expected to fail, but we have the check in the
- // interest of hardening the runtime calls.
- if (maybe_element->ToObject(&element)) {
- Handle<Object> obj(element);
- if (obj->IsJSArray()) {
- Handle<JSArray> array = Handle<JSArray>::cast(obj);
- uint32_t len = static_cast<uint32_t>(array->length()->Number());
- uint32_t nof_elements =
- IterateArrayAndPrototypeElements(array, visitor);
- // Total elements of array and its prototype chain can be more than
- // the array length, but ArrayConcat can only concatenate at most
- // the array length number of elements. We use the length as an estimate
- // for the actual number of elements added.
- uint32_t added_elements = (nof_elements > len) ? len : nof_elements;
- if (JSArray::kMaxElementCount - visited_elements < added_elements) {
- visited_elements = JSArray::kMaxElementCount;
- } else {
- visited_elements += added_elements;
- }
- if (visitor) visitor->increase_index_offset(len);
- } else {
- if (visitor) {
- visitor->visit(0, obj);
- visitor->increase_index_offset(1);
- }
- if (visited_elements < JSArray::kMaxElementCount) {
- visited_elements++;
- }
- }
- }
- }
- return visited_elements;
+ visitor->increase_index_offset(length);
}
/**
* Array::concat implementation.
* See ECMAScript 262, 15.4.4.4.
- * TODO(lrn): Fix non-compliance for very large concatenations and update to
+ * TODO(581): Fix non-compliance for very large concatenations and update to
* following the ECMAScript 5 specification.
*/
static MaybeObject* Runtime_ArrayConcat(Arguments args) {
ASSERT(args.length() == 1);
HandleScope handle_scope;
- CONVERT_CHECKED(JSArray, arg_arrays, args[0]);
- Handle<JSArray> arguments(arg_arrays);
-
- // Pass 1: estimate the number of elements of the result
- // (it could be more than real numbers if prototype has elements).
- uint32_t result_length = 0;
- uint32_t num_of_args = static_cast<uint32_t>(arguments->length()->Number());
-
- { AssertNoAllocation nogc;
- for (uint32_t i = 0; i < num_of_args; i++) {
- Object* obj;
- MaybeObject* maybe_object = arguments->GetElement(i);
- // This if() is not expected to fail, but we have the check in the
- // interest of hardening the runtime calls.
- if (maybe_object->ToObject(&obj)) {
- uint32_t length_estimate;
- if (obj->IsJSArray()) {
- length_estimate =
- static_cast<uint32_t>(JSArray::cast(obj)->length()->Number());
- } else {
- length_estimate = 1;
- }
- if (JSObject::kMaxElementCount - result_length < length_estimate) {
- result_length = JSObject::kMaxElementCount;
- break;
- }
- result_length += length_estimate;
+ CONVERT_ARG_CHECKED(JSArray, arguments, 0);
+ int argument_count = static_cast<int>(arguments->length()->Number());
+ RUNTIME_ASSERT(arguments->HasFastElements());
+ Handle<FixedArray> elements(FixedArray::cast(arguments->elements()));
+
+ // Pass 1: estimate the length and number of elements of the result.
+ // The actual length can be larger if any of the arguments have getters
+ // that mutate other arguments (but will otherwise be precise).
+ // The number of elements is precise if there are no inherited elements.
+
+ uint32_t estimate_result_length = 0;
+ uint32_t estimate_nof_elements = 0;
+ {
+ for (int i = 0; i < argument_count; i++) {
+ HandleScope loop_scope;
+ Handle<Object> obj(elements->get(i));
+ uint32_t length_estimate;
+ uint32_t element_estimate;
+ if (obj->IsJSArray()) {
+ Handle<JSArray> array(Handle<JSArray>::cast(obj));
+ length_estimate =
+ static_cast<uint32_t>(array->length()->Number());
+ element_estimate =
+ EstimateElementCount(array);
+ } else {
+ length_estimate = 1;
+ element_estimate = 1;
+ }
+ // Avoid overflows by capping at kMaxElementCount.
+ if (JSObject::kMaxElementCount - estimate_result_length <
+ length_estimate) {
+ estimate_result_length = JSObject::kMaxElementCount;
+ } else {
+ estimate_result_length += length_estimate;
+ }
+ if (JSObject::kMaxElementCount - estimate_nof_elements <
+ element_estimate) {
+ estimate_nof_elements = JSObject::kMaxElementCount;
+ } else {
+ estimate_nof_elements += element_estimate;
}
}
}
- // Allocate an empty array, will set map, length, and content later.
- Handle<JSArray> result = Factory::NewJSArray(0);
-
- uint32_t estimate_nof_elements = IterateArguments(arguments, NULL);
// If estimated number of elements is more than half of length, a
// fixed array (fast case) is more time and space-efficient than a
// dictionary.
- bool fast_case = (estimate_nof_elements * 2) >= result_length;
+ bool fast_case = (estimate_nof_elements * 2) >= estimate_result_length;
- Handle<Map> map;
Handle<FixedArray> storage;
if (fast_case) {
// The backing storage array must have non-existing elements to
// preserve holes across concat operations.
- map = Factory::GetFastElementsMap(Handle<Map>(result->map()));
- storage = Factory::NewFixedArrayWithHoles(result_length);
+ storage = Factory::NewFixedArrayWithHoles(estimate_result_length);
} else {
- map = Factory::GetSlowElementsMap(Handle<Map>(result->map()));
// TODO(126): move 25% pre-allocation logic into Dictionary::Allocate
uint32_t at_least_space_for = estimate_nof_elements +
(estimate_nof_elements >> 2);
@@ -8406,21 +8482,20 @@ static MaybeObject* Runtime_ArrayConcat(Arguments args) {
Factory::NewNumberDictionary(at_least_space_for));
}
- Handle<Object> len = Factory::NewNumber(static_cast<double>(result_length));
-
- ArrayConcatVisitor visitor(storage, result_length, fast_case);
-
- IterateArguments(arguments, &visitor);
+ ArrayConcatVisitor visitor(storage, fast_case);
- // Please note:
- // - the storage might have been changed in the visitor;
- // - the map and the storage must be set together to avoid breaking
- // the invariant that the map describes the array's elements.
- result->set_map(*map);
- result->set_length(*len);
- result->set_elements(*visitor.storage());
+ for (int i = 0; i < argument_count; i++) {
+ Handle<Object> obj(elements->get(i));
+ if (obj->IsJSArray()) {
+ Handle<JSArray> array = Handle<JSArray>::cast(obj);
+ IterateElements(array, &visitor);
+ } else {
+ visitor.visit(0, obj);
+ visitor.increase_index_offset(1);
+ }
+ }
- return *result;
+ return *visitor.ToArray();
}
diff --git a/deps/v8/src/spaces.h b/deps/v8/src/spaces.h
index 4f2d07b0c..6165255fd 100644
--- a/deps/v8/src/spaces.h
+++ b/deps/v8/src/spaces.h
@@ -2121,6 +2121,12 @@ class MapSpace : public FixedSpace {
accounting_stats_.DeallocateBytes(accounting_stats_.Size());
accounting_stats_.AllocateBytes(new_size);
+ // Flush allocation watermarks.
+ for (Page* p = first_page_; p != top_page; p = p->next_page()) {
+ p->SetAllocationWatermark(p->AllocationTop());
+ }
+ top_page->SetAllocationWatermark(new_top);
+
#ifdef DEBUG
if (FLAG_enable_slow_asserts) {
intptr_t actual_size = 0;
diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc
index 59824863d..2475fb2f1 100644
--- a/deps/v8/src/version.cc
+++ b/deps/v8/src/version.cc
@@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 3
#define MINOR_VERSION 1
-#define BUILD_NUMBER 6
+#define BUILD_NUMBER 7
#define PATCH_LEVEL 0
#define CANDIDATE_VERSION false
diff --git a/deps/v8/src/virtual-frame-heavy-inl.h b/deps/v8/src/virtual-frame-heavy-inl.h
index 2755eee64..ac844b44c 100644
--- a/deps/v8/src/virtual-frame-heavy-inl.h
+++ b/deps/v8/src/virtual-frame-heavy-inl.h
@@ -82,7 +82,13 @@ void VirtualFrame::Push(Register reg, TypeInfo info) {
}
+bool VirtualFrame::ConstantPoolOverflowed() {
+ return FrameElement::ConstantPoolOverflowed();
+}
+
+
void VirtualFrame::Push(Handle<Object> value) {
+ ASSERT(!ConstantPoolOverflowed());
FrameElement element =
FrameElement::ConstantElement(value, FrameElement::NOT_SYNCED);
elements_.Add(element);
diff --git a/deps/v8/src/x64/assembler-x64-inl.h b/deps/v8/src/x64/assembler-x64-inl.h
index 285c07812..b082624f4 100644
--- a/deps/v8/src/x64/assembler-x64-inl.h
+++ b/deps/v8/src/x64/assembler-x64-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc
index ea41a202d..41111a778 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/x64/assembler-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -2995,6 +2995,28 @@ void Assembler::divsd(XMMRegister dst, XMMRegister src) {
}
+void Assembler::andpd(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x54);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::orpd(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x56);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index 553fbe422..f6cd57093 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -30,7 +30,7 @@
// The original source code covered by the above license above has been
// modified significantly by Google Inc.
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// A lightweight X64 Assembler.
@@ -1284,6 +1284,8 @@ class Assembler : public Malloced {
void mulsd(XMMRegister dst, XMMRegister src);
void divsd(XMMRegister dst, XMMRegister src);
+ void andpd(XMMRegister dst, XMMRegister src);
+ void orpd(XMMRegister dst, XMMRegister src);
void xorpd(XMMRegister dst, XMMRegister src);
void sqrtsd(XMMRegister dst, XMMRegister src);
diff --git a/deps/v8/src/x64/builtins-x64.cc b/deps/v8/src/x64/builtins-x64.cc
index c362f7b79..a2dd6cd42 100644
--- a/deps/v8/src/x64/builtins-x64.cc
+++ b/deps/v8/src/x64/builtins-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc
index 240087e17..6cfeed360 100644
--- a/deps/v8/src/x64/code-stubs-x64.cc
+++ b/deps/v8/src/x64/code-stubs-x64.cc
@@ -1506,40 +1506,59 @@ void TypeRecordingBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
- // Input on stack:
- // rsp[8]: argument (should be number).
- // rsp[0]: return address.
+ // TAGGED case:
+ // Input:
+ // rsp[8]: argument (should be number).
+ // rsp[0]: return address.
+ // Output:
+ // rax: tagged double result.
+ // UNTAGGED case:
+ // Input::
+ // rsp[0]: return address.
+ // xmm1: untagged double input argument
+ // Output:
+ // xmm1: untagged double result.
+
Label runtime_call;
Label runtime_call_clear_stack;
- Label input_not_smi;
- NearLabel loaded;
- // Test that rax is a number.
- __ movq(rax, Operand(rsp, kPointerSize));
- __ JumpIfNotSmi(rax, &input_not_smi);
- // Input is a smi. Untag and load it onto the FPU stack.
- // Then load the bits of the double into rbx.
- __ SmiToInteger32(rax, rax);
- __ subq(rsp, Immediate(kPointerSize));
- __ cvtlsi2sd(xmm1, rax);
- __ movsd(Operand(rsp, 0), xmm1);
- __ movq(rbx, xmm1);
- __ movq(rdx, xmm1);
- __ fld_d(Operand(rsp, 0));
- __ addq(rsp, Immediate(kPointerSize));
- __ jmp(&loaded);
-
- __ bind(&input_not_smi);
- // Check if input is a HeapNumber.
- __ Move(rbx, Factory::heap_number_map());
- __ cmpq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
- __ j(not_equal, &runtime_call);
- // Input is a HeapNumber. Push it on the FPU stack and load its
- // bits into rbx.
- __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
- __ movq(rbx, FieldOperand(rax, HeapNumber::kValueOffset));
- __ movq(rdx, rbx);
- __ bind(&loaded);
- // ST[0] == double value
+ Label skip_cache;
+ const bool tagged = (argument_type_ == TAGGED);
+ if (tagged) {
+ NearLabel input_not_smi;
+ NearLabel loaded;
+ // Test that rax is a number.
+ __ movq(rax, Operand(rsp, kPointerSize));
+ __ JumpIfNotSmi(rax, &input_not_smi);
+ // Input is a smi. Untag and load it onto the FPU stack.
+ // Then load the bits of the double into rbx.
+ __ SmiToInteger32(rax, rax);
+ __ subq(rsp, Immediate(kDoubleSize));
+ __ cvtlsi2sd(xmm1, rax);
+ __ movsd(Operand(rsp, 0), xmm1);
+ __ movq(rbx, xmm1);
+ __ movq(rdx, xmm1);
+ __ fld_d(Operand(rsp, 0));
+ __ addq(rsp, Immediate(kDoubleSize));
+ __ jmp(&loaded);
+
+ __ bind(&input_not_smi);
+ // Check if input is a HeapNumber.
+ __ LoadRoot(rbx, Heap::kHeapNumberMapRootIndex);
+ __ cmpq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ j(not_equal, &runtime_call);
+ // Input is a HeapNumber. Push it on the FPU stack and load its
+ // bits into rbx.
+ __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
+ __ movq(rbx, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ movq(rdx, rbx);
+
+ __ bind(&loaded);
+ } else { // UNTAGGED.
+ __ movq(rbx, xmm1);
+ __ movq(rdx, xmm1);
+ }
+
+ // ST[0] == double value, if TAGGED.
// rbx = bits of double value.
// rdx = also bits of double value.
// Compute hash (h is 32 bits, bits are 64 and the shifts are arithmetic):
@@ -1571,7 +1590,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// rax points to the cache for the type type_.
// If NULL, the cache hasn't been initialized yet, so go through runtime.
__ testq(rax, rax);
- __ j(zero, &runtime_call_clear_stack);
+ __ j(zero, &runtime_call_clear_stack); // Only clears stack if TAGGED.
#ifdef DEBUG
// Check that the layout of cache elements match expectations.
{ // NOLINT - doesn't like a single brace on a line.
@@ -1597,30 +1616,70 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ j(not_equal, &cache_miss);
// Cache hit!
__ movq(rax, Operand(rcx, 2 * kIntSize));
- __ fstp(0); // Clear FPU stack.
- __ ret(kPointerSize);
+ if (tagged) {
+ __ fstp(0); // Clear FPU stack.
+ __ ret(kPointerSize);
+ } else { // UNTAGGED.
+ __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ Ret();
+ }
__ bind(&cache_miss);
// Update cache with new value.
- Label nan_result;
- GenerateOperation(masm, &nan_result);
+ if (tagged) {
__ AllocateHeapNumber(rax, rdi, &runtime_call_clear_stack);
+ } else { // UNTAGGED.
+ __ AllocateHeapNumber(rax, rdi, &skip_cache);
+ __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm1);
+ __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
+ }
+ GenerateOperation(masm);
__ movq(Operand(rcx, 0), rbx);
__ movq(Operand(rcx, 2 * kIntSize), rax);
__ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
- __ ret(kPointerSize);
-
- __ bind(&runtime_call_clear_stack);
- __ fstp(0);
- __ bind(&runtime_call);
- __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1);
+ if (tagged) {
+ __ ret(kPointerSize);
+ } else { // UNTAGGED.
+ __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ Ret();
+
+ // Skip cache and return answer directly, only in untagged case.
+ __ bind(&skip_cache);
+ __ subq(rsp, Immediate(kDoubleSize));
+ __ movsd(Operand(rsp, 0), xmm1);
+ __ fld_d(Operand(rsp, 0));
+ GenerateOperation(masm);
+ __ fstp_d(Operand(rsp, 0));
+ __ movsd(xmm1, Operand(rsp, 0));
+ __ addq(rsp, Immediate(kDoubleSize));
+ // We return the value in xmm1 without adding it to the cache, but
+ // we cause a scavenging GC so that future allocations will succeed.
+ __ EnterInternalFrame();
+ // Allocate an unused object bigger than a HeapNumber.
+ __ Push(Smi::FromInt(2 * kDoubleSize));
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
+ __ LeaveInternalFrame();
+ __ Ret();
+ }
- __ bind(&nan_result);
- __ fstp(0); // Remove argument from FPU stack.
- __ LoadRoot(rax, Heap::kNanValueRootIndex);
- __ movq(Operand(rcx, 0), rbx);
- __ movq(Operand(rcx, 2 * kIntSize), rax);
- __ ret(kPointerSize);
+ // Call runtime, doing whatever allocation and cleanup is necessary.
+ if (tagged) {
+ __ bind(&runtime_call_clear_stack);
+ __ fstp(0);
+ __ bind(&runtime_call);
+ __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1);
+ } else { // UNTAGGED.
+ __ bind(&runtime_call_clear_stack);
+ __ bind(&runtime_call);
+ __ AllocateHeapNumber(rax, rdi, &skip_cache);
+ __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm1);
+ __ EnterInternalFrame();
+ __ push(rax);
+ __ CallRuntime(RuntimeFunction(), 1);
+ __ LeaveInternalFrame();
+ __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ Ret();
+ }
}
@@ -1637,9 +1696,9 @@ Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
}
-void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm,
- Label* on_nan_result) {
+void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
// Registers:
+ // rax: Newly allocated HeapNumber, which must be preserved.
// rbx: Bits of input double. Must be preserved.
// rcx: Pointer to cache entry. Must be preserved.
// st(0): Input double
@@ -1661,9 +1720,18 @@ void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm,
__ j(below, &in_range);
// Check for infinity and NaN. Both return NaN for sin.
__ cmpl(rdi, Immediate(0x7ff));
- __ j(equal, on_nan_result);
+ NearLabel non_nan_result;
+ __ j(not_equal, &non_nan_result);
+ // Input is +/-Infinity or NaN. Result is NaN.
+ __ fstp(0);
+ __ LoadRoot(kScratchRegister, Heap::kNanValueRootIndex);
+ __ fld_d(FieldOperand(kScratchRegister, HeapNumber::kValueOffset));
+ __ jmp(&done);
+
+ __ bind(&non_nan_result);
// Use fpmod to restrict argument to the range +/-2*PI.
+ __ movq(rdi, rax); // Save rax before using fnstsw_ax.
__ fldpi();
__ fadd(0);
__ fld(1);
@@ -1696,6 +1764,7 @@ void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm,
// FPU Stack: input % 2*pi, 2*pi,
__ fstp(0);
// FPU Stack: input % 2*pi
+ __ movq(rax, rdi); // Restore rax, pointer to the new HeapNumber.
__ bind(&in_range);
switch (type_) {
case TranscendentalCache::SIN:
diff --git a/deps/v8/src/x64/code-stubs-x64.h b/deps/v8/src/x64/code-stubs-x64.h
index 1e6fc6514..32a37b215 100644
--- a/deps/v8/src/x64/code-stubs-x64.h
+++ b/deps/v8/src/x64/code-stubs-x64.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -39,15 +39,23 @@ namespace internal {
// TranscendentalCache runtime function.
class TranscendentalCacheStub: public CodeStub {
public:
- explicit TranscendentalCacheStub(TranscendentalCache::Type type)
- : type_(type) {}
+ enum ArgumentType {
+ TAGGED = 0,
+ UNTAGGED = 1 << TranscendentalCache::kTranscendentalTypeBits
+ };
+
+ explicit TranscendentalCacheStub(TranscendentalCache::Type type,
+ ArgumentType argument_type)
+ : type_(type), argument_type_(argument_type) {}
void Generate(MacroAssembler* masm);
private:
TranscendentalCache::Type type_;
+ ArgumentType argument_type_;
+
Major MajorKey() { return TranscendentalCache; }
- int MinorKey() { return type_; }
+ int MinorKey() { return type_ | argument_type_; }
Runtime::FunctionId RuntimeFunction();
- void GenerateOperation(MacroAssembler* masm, Label* on_nan_result);
+ void GenerateOperation(MacroAssembler* masm);
};
diff --git a/deps/v8/src/x64/codegen-x64-inl.h b/deps/v8/src/x64/codegen-x64-inl.h
index 60e9ab03a..53caf9197 100644
--- a/deps/v8/src/x64/codegen-x64-inl.h
+++ b/deps/v8/src/x64/codegen-x64-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc
index c07bcf904..dfee36ed7 100644
--- a/deps/v8/src/x64/codegen-x64.cc
+++ b/deps/v8/src/x64/codegen-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -4694,7 +4694,18 @@ void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
void CodeGenerator::VisitLiteral(Literal* node) {
Comment cmnt(masm_, "[ Literal");
- frame_->Push(node->handle());
+ if (frame_->ConstantPoolOverflowed()) {
+ Result temp = allocator_->Allocate();
+ ASSERT(temp.is_valid());
+ if (node->handle()->IsSmi()) {
+ __ Move(temp.reg(), Smi::cast(*node->handle()));
+ } else {
+ __ movq(temp.reg(), node->handle(), RelocInfo::EMBEDDED_OBJECT);
+ }
+ frame_->Push(&temp);
+ } else {
+ frame_->Push(node->handle());
+ }
}
@@ -7030,7 +7041,8 @@ void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
ASSERT_EQ(args->length(), 1);
Load(args->at(0));
- TranscendentalCacheStub stub(TranscendentalCache::SIN);
+ TranscendentalCacheStub stub(TranscendentalCache::SIN,
+ TranscendentalCacheStub::TAGGED);
Result result = frame_->CallStub(&stub, 1);
frame_->Push(&result);
}
@@ -7039,7 +7051,8 @@ void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
ASSERT_EQ(args->length(), 1);
Load(args->at(0));
- TranscendentalCacheStub stub(TranscendentalCache::COS);
+ TranscendentalCacheStub stub(TranscendentalCache::COS,
+ TranscendentalCacheStub::TAGGED);
Result result = frame_->CallStub(&stub, 1);
frame_->Push(&result);
}
@@ -7048,7 +7061,8 @@ void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
void CodeGenerator::GenerateMathLog(ZoneList<Expression*>* args) {
ASSERT_EQ(args->length(), 1);
Load(args->at(0));
- TranscendentalCacheStub stub(TranscendentalCache::LOG);
+ TranscendentalCacheStub stub(TranscendentalCache::LOG,
+ TranscendentalCacheStub::TAGGED);
Result result = frame_->CallStub(&stub, 1);
frame_->Push(&result);
}
diff --git a/deps/v8/src/x64/codegen-x64.h b/deps/v8/src/x64/codegen-x64.h
index c283db3a0..439282919 100644
--- a/deps/v8/src/x64/codegen-x64.h
+++ b/deps/v8/src/x64/codegen-x64.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
diff --git a/deps/v8/src/x64/cpu-x64.cc b/deps/v8/src/x64/cpu-x64.cc
index 513c52286..3ff292e82 100644
--- a/deps/v8/src/x64/cpu-x64.cc
+++ b/deps/v8/src/x64/cpu-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
diff --git a/deps/v8/src/x64/debug-x64.cc b/deps/v8/src/x64/debug-x64.cc
index 4218647f3..2c50ddd14 100644
--- a/deps/v8/src/x64/debug-x64.cc
+++ b/deps/v8/src/x64/debug-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
diff --git a/deps/v8/src/x64/deoptimizer-x64.cc b/deps/v8/src/x64/deoptimizer-x64.cc
index 595dedc47..627814285 100644
--- a/deps/v8/src/x64/deoptimizer-x64.cc
+++ b/deps/v8/src/x64/deoptimizer-x64.cc
@@ -224,7 +224,7 @@ void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
// ok:
//
ASSERT(*(call_target_address - 3) == 0x73 && // jae
- *(call_target_address - 2) == 0x05 && // offset
+ *(call_target_address - 2) == 0x07 && // offset
*(call_target_address - 1) == 0xe8); // call
*(call_target_address - 3) = 0x90; // nop
*(call_target_address - 2) = 0x90; // nop
@@ -245,14 +245,154 @@ void Deoptimizer::RevertStackCheckCodeAt(Address pc_after,
*(call_target_address - 2) == 0x90 && // nop
*(call_target_address - 1) == 0xe8); // call
*(call_target_address - 3) = 0x73; // jae
- *(call_target_address - 2) = 0x05; // offset
+ *(call_target_address - 2) = 0x07; // offset
Assembler::set_target_address_at(call_target_address,
check_code->entry());
}
+static int LookupBailoutId(DeoptimizationInputData* data, unsigned ast_id) {
+ ByteArray* translations = data->TranslationByteArray();
+ int length = data->DeoptCount();
+ for (int i = 0; i < length; i++) {
+ if (static_cast<unsigned>(data->AstId(i)->value()) == ast_id) {
+ TranslationIterator it(translations, data->TranslationIndex(i)->value());
+ int value = it.Next();
+ ASSERT(Translation::BEGIN == static_cast<Translation::Opcode>(value));
+ // Read the number of frames.
+ value = it.Next();
+ if (value == 1) return i;
+ }
+ }
+ UNREACHABLE();
+ return -1;
+}
+
+
void Deoptimizer::DoComputeOsrOutputFrame() {
- UNIMPLEMENTED();
+ DeoptimizationInputData* data = DeoptimizationInputData::cast(
+ optimized_code_->deoptimization_data());
+ unsigned ast_id = data->OsrAstId()->value();
+ // TODO(kasperl): This should not be the bailout_id_. It should be
+ // the ast id. Confusing.
+ ASSERT(bailout_id_ == ast_id);
+
+ int bailout_id = LookupBailoutId(data, ast_id);
+ unsigned translation_index = data->TranslationIndex(bailout_id)->value();
+ ByteArray* translations = data->TranslationByteArray();
+
+ TranslationIterator iterator(translations, translation_index);
+ Translation::Opcode opcode =
+ static_cast<Translation::Opcode>(iterator.Next());
+ ASSERT(Translation::BEGIN == opcode);
+ USE(opcode);
+ int count = iterator.Next();
+ ASSERT(count == 1);
+ USE(count);
+
+ opcode = static_cast<Translation::Opcode>(iterator.Next());
+ USE(opcode);
+ ASSERT(Translation::FRAME == opcode);
+ unsigned node_id = iterator.Next();
+ USE(node_id);
+ ASSERT(node_id == ast_id);
+ JSFunction* function = JSFunction::cast(ComputeLiteral(iterator.Next()));
+ USE(function);
+ ASSERT(function == function_);
+ unsigned height = iterator.Next();
+ unsigned height_in_bytes = height * kPointerSize;
+ USE(height_in_bytes);
+
+ unsigned fixed_size = ComputeFixedSize(function_);
+ unsigned input_frame_size = static_cast<unsigned>(input_->GetFrameSize());
+ ASSERT(fixed_size + height_in_bytes == input_frame_size);
+
+ unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize;
+ unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
+ unsigned outgoing_size = outgoing_height * kPointerSize;
+ unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
+ ASSERT(outgoing_size == 0); // OSR does not happen in the middle of a call.
+
+ if (FLAG_trace_osr) {
+ PrintF("[on-stack replacement: begin 0x%08" V8PRIxPTR " ",
+ reinterpret_cast<intptr_t>(function_));
+ function_->PrintName();
+ PrintF(" => node=%u, frame=%d->%d]\n",
+ ast_id,
+ input_frame_size,
+ output_frame_size);
+ }
+
+ // There's only one output frame in the OSR case.
+ output_count_ = 1;
+ output_ = new FrameDescription*[1];
+ output_[0] = new(output_frame_size) FrameDescription(
+ output_frame_size, function_);
+
+ // Clear the incoming parameters in the optimized frame to avoid
+ // confusing the garbage collector.
+ unsigned output_offset = output_frame_size - kPointerSize;
+ int parameter_count = function_->shared()->formal_parameter_count() + 1;
+ for (int i = 0; i < parameter_count; ++i) {
+ output_[0]->SetFrameSlot(output_offset, 0);
+ output_offset -= kPointerSize;
+ }
+
+ // Translate the incoming parameters. This may overwrite some of the
+ // incoming argument slots we've just cleared.
+ int input_offset = input_frame_size - kPointerSize;
+ bool ok = true;
+ int limit = input_offset - (parameter_count * kPointerSize);
+ while (ok && input_offset > limit) {
+ ok = DoOsrTranslateCommand(&iterator, &input_offset);
+ }
+
+ // There are no translation commands for the caller's pc and fp, the
+ // context, and the function. Set them up explicitly.
+ for (int i = 0; ok && i < 4; i++) {
+ intptr_t input_value = input_->GetFrameSlot(input_offset);
+ if (FLAG_trace_osr) {
+ PrintF(" [esp + %d] <- 0x%08" V8PRIxPTR " ; [esp + %d] (fixed part)\n",
+ output_offset,
+ input_value,
+ input_offset);
+ }
+ output_[0]->SetFrameSlot(output_offset, input_->GetFrameSlot(input_offset));
+ input_offset -= kPointerSize;
+ output_offset -= kPointerSize;
+ }
+
+ // Translate the rest of the frame.
+ while (ok && input_offset >= 0) {
+ ok = DoOsrTranslateCommand(&iterator, &input_offset);
+ }
+
+ // If translation of any command failed, continue using the input frame.
+ if (!ok) {
+ delete output_[0];
+ output_[0] = input_;
+ output_[0]->SetPc(reinterpret_cast<intptr_t>(from_));
+ } else {
+ // Setup the frame pointer and the context pointer.
+ output_[0]->SetRegister(rbp.code(), input_->GetRegister(rbp.code()));
+ output_[0]->SetRegister(rsi.code(), input_->GetRegister(rsi.code()));
+
+ unsigned pc_offset = data->OsrPcOffset()->value();
+ intptr_t pc = reinterpret_cast<intptr_t>(
+ optimized_code_->entry() + pc_offset);
+ output_[0]->SetPc(pc);
+ }
+ Code* continuation = Builtins::builtin(Builtins::NotifyOSR);
+ output_[0]->SetContinuation(
+ reinterpret_cast<intptr_t>(continuation->entry()));
+
+ if (FLAG_trace_osr) {
+ PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ",
+ ok ? "finished" : "aborted",
+ reinterpret_cast<intptr_t>(function));
+ function->PrintName();
+ PrintF(" => pc=0x%0" V8PRIxPTR "]\n", output_[0]->GetPc());
+ }
}
diff --git a/deps/v8/src/x64/disasm-x64.cc b/deps/v8/src/x64/disasm-x64.cc
index f73f94845..21a100f59 100644
--- a/deps/v8/src/x64/disasm-x64.cc
+++ b/deps/v8/src/x64/disasm-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -1040,14 +1040,18 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
AppendToBuffer(", %s", NameOfXMMRegister(regop));
} else {
const char* mnemonic = "?";
- if (opcode == 0x57) {
+ if (opcode == 0x50) {
+ mnemonic = "movmskpd";
+ } else if (opcode == 0x54) {
+ mnemonic = "andpd";
+ } else if (opcode == 0x56) {
+ mnemonic = "orpd";
+ } else if (opcode == 0x57) {
mnemonic = "xorpd";
} else if (opcode == 0x2E) {
mnemonic = "ucomisd";
} else if (opcode == 0x2F) {
mnemonic = "comisd";
- } else if (opcode == 0x50) {
- mnemonic = "movmskpd";
} else {
UnimplementedInstruction();
}
diff --git a/deps/v8/src/x64/frames-x64.cc b/deps/v8/src/x64/frames-x64.cc
index 9c960478a..6c58bc9e0 100644
--- a/deps/v8/src/x64/frames-x64.cc
+++ b/deps/v8/src/x64/frames-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
diff --git a/deps/v8/src/x64/frames-x64.h b/deps/v8/src/x64/frames-x64.h
index 998b3e9fc..81be81919 100644
--- a/deps/v8/src/x64/frames-x64.h
+++ b/deps/v8/src/x64/frames-x64.h
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
diff --git a/deps/v8/src/x64/full-codegen-x64.cc b/deps/v8/src/x64/full-codegen-x64.cc
index 8711f4238..b8d7e5019 100644
--- a/deps/v8/src/x64/full-codegen-x64.cc
+++ b/deps/v8/src/x64/full-codegen-x64.cc
@@ -207,43 +207,45 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
Move(dot_arguments_slot, rcx, rbx, rdx);
}
- { Comment cmnt(masm_, "[ Declarations");
- // For named function expressions, declare the function name as a
- // constant.
- if (scope()->is_function_scope() && scope()->function() != NULL) {
- EmitDeclaration(scope()->function(), Variable::CONST, NULL);
- }
- // Visit all the explicit declarations unless there is an illegal
- // redeclaration.
- if (scope()->HasIllegalRedeclaration()) {
- scope()->VisitIllegalRedeclaration(this);
- } else {
- VisitDeclarations(scope()->declarations());
- }
- }
-
if (FLAG_trace) {
__ CallRuntime(Runtime::kTraceEnter, 0);
}
- { Comment cmnt(masm_, "[ Stack check");
- PrepareForBailout(info->function(), NO_REGISTERS);
- NearLabel ok;
- __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
- __ j(above_equal, &ok);
- StackCheckStub stub;
- __ CallStub(&stub);
- __ bind(&ok);
- }
+ // Visit the declarations and body unless there is an illegal
+ // redeclaration.
+ if (scope()->HasIllegalRedeclaration()) {
+ Comment cmnt(masm_, "[ Declarations");
+ scope()->VisitIllegalRedeclaration(this);
+ } else {
+ { Comment cmnt(masm_, "[ Declarations");
+ // For named function expressions, declare the function name as a
+ // constant.
+ if (scope()->is_function_scope() && scope()->function() != NULL) {
+ EmitDeclaration(scope()->function(), Variable::CONST, NULL);
+ }
+ VisitDeclarations(scope()->declarations());
+ }
- { Comment cmnt(masm_, "[ Body");
- ASSERT(loop_depth() == 0);
- VisitStatements(function()->body());
- ASSERT(loop_depth() == 0);
+ { Comment cmnt(masm_, "[ Stack check");
+ PrepareForBailout(info->function(), NO_REGISTERS);
+ NearLabel ok;
+ __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
+ __ j(above_equal, &ok);
+ StackCheckStub stub;
+ __ CallStub(&stub);
+ __ bind(&ok);
+ }
+
+ { Comment cmnt(masm_, "[ Body");
+ ASSERT(loop_depth() == 0);
+ VisitStatements(function()->body());
+ ASSERT(loop_depth() == 0);
+ }
}
+ // Always emit a 'return undefined' in case control fell off the end of
+ // the body.
{ Comment cmnt(masm_, "[ return <undefined>;");
- // Emit a 'return undefined' in case control fell off the end of the body.
__ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
EmitReturnSequence();
}
@@ -267,6 +269,13 @@ void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) {
// the deoptimization input data found in the optimized code.
RecordStackCheck(stmt->OsrEntryId());
+ // Loop stack checks can be patched to perform on-stack replacement. In
+ // order to decide whether or not to perform OSR we embed the loop depth
+ // in a test instruction after the call so we can extract it from the OSR
+ // builtin.
+ ASSERT(loop_depth() > 0);
+ __ testl(rax, Immediate(Min(loop_depth(), Code::kMaxLoopNestingMarker)));
+
__ bind(&ok);
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
// Record a mapping of the OSR id to this PC. This is used if the OSR
@@ -543,7 +552,7 @@ void FullCodeGenerator::DoTest(Label* if_true,
__ j(equal, if_true);
__ CompareRoot(result_register(), Heap::kFalseValueRootIndex);
__ j(equal, if_false);
- ASSERT_EQ(0, kSmiTag);
+ STATIC_ASSERT(kSmiTag == 0);
__ SmiCompare(result_register(), Smi::FromInt(0));
__ j(equal, if_false);
Condition is_smi = masm_->CheckSmi(result_register());
@@ -851,7 +860,9 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
VisitForAccumulatorValue(stmt->enumerable());
__ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
__ j(equal, &exit);
- __ CompareRoot(rax, Heap::kNullValueRootIndex);
+ Register null_value = rdi;
+ __ LoadRoot(null_value, Heap::kNullValueRootIndex);
+ __ cmpq(rax, null_value);
__ j(equal, &exit);
// Convert the object to a JS object.
@@ -865,12 +876,61 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ bind(&done_convert);
__ push(rax);
- // BUG(867): Check cache validity in generated code. This is a fast
- // case for the JSObject::IsSimpleEnum cache validity checks. If we
- // cannot guarantee cache validity, call the runtime system to check
- // cache validity or get the property names in a fixed array.
+ // Check cache validity in generated code. This is a fast case for
+ // the JSObject::IsSimpleEnum cache validity checks. If we cannot
+ // guarantee cache validity, call the runtime system to check cache
+ // validity or get the property names in a fixed array.
+ Label next, call_runtime;
+ Register empty_fixed_array_value = r8;
+ __ LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
+ Register empty_descriptor_array_value = r9;
+ __ LoadRoot(empty_descriptor_array_value,
+ Heap::kEmptyDescriptorArrayRootIndex);
+ __ movq(rcx, rax);
+ __ bind(&next);
+
+ // Check that there are no elements. Register rcx contains the
+ // current JS object we've reached through the prototype chain.
+ __ cmpq(empty_fixed_array_value,
+ FieldOperand(rcx, JSObject::kElementsOffset));
+ __ j(not_equal, &call_runtime);
+
+ // Check that instance descriptors are not empty so that we can
+ // check for an enum cache. Leave the map in rbx for the subsequent
+ // prototype load.
+ __ movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
+ __ movq(rdx, FieldOperand(rbx, Map::kInstanceDescriptorsOffset));
+ __ cmpq(rdx, empty_descriptor_array_value);
+ __ j(equal, &call_runtime);
+
+ // Check that there is an enum cache in the non-empty instance
+ // descriptors (rdx). This is the case if the next enumeration
+ // index field does not contain a smi.
+ __ movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumerationIndexOffset));
+ __ JumpIfSmi(rdx, &call_runtime);
+
+ // For all objects but the receiver, check that the cache is empty.
+ NearLabel check_prototype;
+ __ cmpq(rcx, rax);
+ __ j(equal, &check_prototype);
+ __ movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumCacheBridgeCacheOffset));
+ __ cmpq(rdx, empty_fixed_array_value);
+ __ j(not_equal, &call_runtime);
+
+ // Load the prototype from the map and loop if non-null.
+ __ bind(&check_prototype);
+ __ movq(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
+ __ cmpq(rcx, null_value);
+ __ j(not_equal, &next);
+
+ // The enum cache is valid. Load the map of the object being
+ // iterated over and use the cache for the iteration.
+ NearLabel use_cache;
+ __ movq(rax, FieldOperand(rax, HeapObject::kMapOffset));
+ __ jmp(&use_cache);
// Get the set of properties to enumerate.
+ __ bind(&call_runtime);
__ push(rax); // Duplicate the enumerable object on the stack.
__ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
@@ -883,6 +943,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ j(not_equal, &fixed_array);
// We got a map in register rax. Get the enumeration cache from it.
+ __ bind(&use_cache);
__ movq(rcx, FieldOperand(rax, Map::kInstanceDescriptorsOffset));
__ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumerationIndexOffset));
__ movq(rdx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset));
@@ -971,8 +1032,14 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
bool pretenure) {
// Use the fast case closure allocation code that allocates in new
- // space for nested functions that don't need literals cloning.
- if (scope()->is_function_scope() &&
+ // space for nested functions that don't need literals cloning. If
+ // we're running with the --always-opt or the --prepare-always-opt
+ // flag, we need to use the runtime function so that the new function
+ // we are creating here gets a chance to have its code optimized and
+ // doesn't just get a copy of the existing unoptimized code.
+ if (!FLAG_always_opt &&
+ !FLAG_prepare_always_opt &&
+ scope()->is_function_scope() &&
info->num_literals() == 0 &&
!pretenure) {
FastNewClosureStub stub;
@@ -1082,8 +1149,11 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(
// Check that last extension is NULL.
__ cmpq(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
__ j(not_equal, slow);
- __ movq(temp, ContextOperand(context, Context::FCONTEXT_INDEX));
- return ContextOperand(temp, slot->index());
+
+ // This function is used only for loads, not stores, so it's safe to
+ // return an rsi-based operand (the write barrier cannot be allowed to
+ // destroy the rsi register).
+ return ContextOperand(context, slot->index());
}
@@ -1730,57 +1800,75 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
: Builtins::StoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
- } else if (var->mode() != Variable::CONST || op == Token::INIT_CONST) {
- // Perform the assignment for non-const variables and for initialization
- // of const variables. Const assignments are simply skipped.
- Label done;
+ } else if (op == Token::INIT_CONST) {
+ // Like var declarations, const declarations are hoisted to function
+ // scope. However, unlike var initializers, const initializers are able
+ // to drill a hole to that function context, even from inside a 'with'
+ // context. We thus bypass the normal static scope lookup.
+ Slot* slot = var->AsSlot();
+ Label skip;
+ switch (slot->type()) {
+ case Slot::PARAMETER:
+ // No const parameters.
+ UNREACHABLE();
+ break;
+ case Slot::LOCAL:
+ __ movq(rdx, Operand(rbp, SlotOffset(slot)));
+ __ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
+ __ j(not_equal, &skip);
+ __ movq(Operand(rbp, SlotOffset(slot)), rax);
+ break;
+ case Slot::CONTEXT: {
+ __ movq(rcx, ContextOperand(rsi, Context::FCONTEXT_INDEX));
+ __ movq(rdx, ContextOperand(rcx, slot->index()));
+ __ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
+ __ j(not_equal, &skip);
+ __ movq(ContextOperand(rcx, slot->index()), rax);
+ int offset = Context::SlotOffset(slot->index());
+ __ movq(rdx, rax); // Preserve the stored value in eax.
+ __ RecordWrite(rcx, offset, rdx, rbx);
+ break;
+ }
+ case Slot::LOOKUP:
+ __ push(rax);
+ __ push(rsi);
+ __ Push(var->name());
+ __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
+ break;
+ }
+ __ bind(&skip);
+
+ } else if (var->mode() != Variable::CONST) {
+ // Perform the assignment for non-const variables. Const assignments
+ // are simply skipped.
Slot* slot = var->AsSlot();
switch (slot->type()) {
case Slot::PARAMETER:
case Slot::LOCAL:
- if (op == Token::INIT_CONST) {
- // Detect const reinitialization by checking for the hole value.
- __ movq(rdx, Operand(rbp, SlotOffset(slot)));
- __ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
- __ j(not_equal, &done);
- }
// Perform the assignment.
__ movq(Operand(rbp, SlotOffset(slot)), rax);
break;
case Slot::CONTEXT: {
MemOperand target = EmitSlotSearch(slot, rcx);
- if (op == Token::INIT_CONST) {
- // Detect const reinitialization by checking for the hole value.
- __ movq(rdx, target);
- __ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
- __ j(not_equal, &done);
- }
// Perform the assignment and issue the write barrier.
__ movq(target, rax);
// The value of the assignment is in rax. RecordWrite clobbers its
// register arguments.
__ movq(rdx, rax);
- int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
+ int offset = Context::SlotOffset(slot->index());
__ RecordWrite(rcx, offset, rdx, rbx);
break;
}
case Slot::LOOKUP:
- // Call the runtime for the assignment. The runtime will ignore
- // const reinitialization.
+ // Call the runtime for the assignment.
__ push(rax); // Value.
__ push(rsi); // Context.
__ Push(var->name());
- if (op == Token::INIT_CONST) {
- // The runtime will ignore const redeclaration.
- __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
- } else {
- __ CallRuntime(Runtime::kStoreContextSlot, 3);
- }
+ __ CallRuntime(Runtime::kStoreContextSlot, 3);
break;
}
- __ bind(&done);
}
}
@@ -2805,7 +2893,8 @@ void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) {
void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
// Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::SIN);
+ TranscendentalCacheStub stub(TranscendentalCache::SIN,
+ TranscendentalCacheStub::TAGGED);
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
__ CallStub(&stub);
@@ -2815,7 +2904,8 @@ void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
// Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::COS);
+ TranscendentalCacheStub stub(TranscendentalCache::COS,
+ TranscendentalCacheStub::TAGGED);
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
__ CallStub(&stub);
@@ -2825,7 +2915,8 @@ void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
void FullCodeGenerator::EmitMathLog(ZoneList<Expression*>* args) {
// Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::LOG);
+ TranscendentalCacheStub stub(TranscendentalCache::LOG,
+ TranscendentalCacheStub::TAGGED);
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
__ CallStub(&stub);
@@ -3000,9 +3091,12 @@ void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
-
VisitForAccumulatorValue(args->at(0));
+ if (FLAG_debug_code) {
+ __ AbortIfNotString(rax);
+ }
+
__ movl(rax, FieldOperand(rax, String::kHashFieldOffset));
ASSERT(String::kHashShift >= kSmiTagSize);
__ IndexFromHash(rax, rax);
@@ -3715,6 +3809,22 @@ void FullCodeGenerator::EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode) {
void FullCodeGenerator::EmitCallIC(Handle<Code> ic, JumpPatchSite* patch_site) {
+ switch (ic->kind()) {
+ case Code::LOAD_IC:
+ __ IncrementCounter(&Counters::named_load_full, 1);
+ break;
+ case Code::KEYED_LOAD_IC:
+ __ IncrementCounter(&Counters::keyed_load_full, 1);
+ break;
+ case Code::STORE_IC:
+ __ IncrementCounter(&Counters::named_store_full, 1);
+ break;
+ case Code::KEYED_STORE_IC:
+ __ IncrementCounter(&Counters::keyed_store_full, 1);
+ default:
+ break;
+ }
+
__ call(ic, RelocInfo::CODE_TARGET);
if (patch_site != NULL && patch_site->is_bound()) {
patch_site->EmitPatchInfo();
diff --git a/deps/v8/src/x64/ic-x64.cc b/deps/v8/src/x64/ic-x64.cc
index f8c40ab4e..55d837c6b 100644
--- a/deps/v8/src/x64/ic-x64.cc
+++ b/deps/v8/src/x64/ic-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
diff --git a/deps/v8/src/x64/jump-target-x64.cc b/deps/v8/src/x64/jump-target-x64.cc
index 1208b0dbe..e71560463 100644
--- a/deps/v8/src/x64/jump-target-x64.cc
+++ b/deps/v8/src/x64/jump-target-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
diff --git a/deps/v8/src/x64/lithium-codegen-x64.cc b/deps/v8/src/x64/lithium-codegen-x64.cc
index e6904b4da..90244f1c2 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.cc
+++ b/deps/v8/src/x64/lithium-codegen-x64.cc
@@ -711,7 +711,8 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
break;
}
case CodeStub::TranscendentalCache: {
- TranscendentalCacheStub stub(instr->transcendental_type());
+ TranscendentalCacheStub stub(instr->transcendental_type(),
+ TranscendentalCacheStub::TAGGED);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
@@ -1579,7 +1580,20 @@ static Condition BranchCondition(HHasInstanceType* instr) {
void LCodeGen::DoHasInstanceType(LHasInstanceType* instr) {
- Abort("Unimplemented: %s", "DoHasInstanceType");
+ Register input = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+
+ ASSERT(instr->hydrogen()->value()->representation().IsTagged());
+ __ testl(input, Immediate(kSmiTagMask));
+ NearLabel done, is_false;
+ __ j(zero, &is_false);
+ __ CmpObjectType(input, TestType(instr->hydrogen()), result);
+ __ j(NegateCondition(BranchCondition(instr->hydrogen())), &is_false);
+ __ LoadRoot(result, Heap::kTrueValueRootIndex);
+ __ jmp(&done);
+ __ bind(&is_false);
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
+ __ bind(&done);
}
@@ -2271,12 +2285,105 @@ void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
- Abort("Unimplemented: %s", "DoDeferredMathAbsTaggedHeapNumber");
+ Register input_reg = ToRegister(instr->InputAt(0));
+ __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ DeoptimizeIf(not_equal, instr->environment());
+
+ Label done;
+ Register tmp = input_reg.is(rax) ? rcx : rax;
+ Register tmp2 = tmp.is(rcx) ? rdx : input_reg.is(rcx) ? rdx : rcx;
+
+ // Preserve the value of all registers.
+ __ PushSafepointRegisters();
+
+ Label negative;
+ __ movl(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
+ // Check the sign of the argument. If the argument is positive, just
+ // return it. We do not need to patch the stack since |input| and
+ // |result| are the same register and |input| will be restored
+ // unchanged by popping safepoint registers.
+ __ testl(tmp, Immediate(HeapNumber::kSignMask));
+ __ j(not_zero, &negative);
+ __ jmp(&done);
+
+ __ bind(&negative);
+
+ Label allocated, slow;
+ __ AllocateHeapNumber(tmp, tmp2, &slow);
+ __ jmp(&allocated);
+
+ // Slow case: Call the runtime system to do the number allocation.
+ __ bind(&slow);
+
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
+ // Set the pointer to the new heap number in tmp.
+ if (!tmp.is(rax)) {
+ __ movq(tmp, rax);
+ }
+
+ // Restore input_reg after call to runtime.
+ __ LoadFromSafepointRegisterSlot(input_reg, input_reg);
+
+ __ bind(&allocated);
+ __ movq(tmp2, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ __ shl(tmp2, Immediate(1));
+ __ shr(tmp2, Immediate(1));
+ __ movq(FieldOperand(tmp, HeapNumber::kValueOffset), tmp2);
+ __ StoreToSafepointRegisterSlot(input_reg, tmp);
+
+ __ bind(&done);
+ __ PopSafepointRegisters();
+}
+
+
+void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
+ Register input_reg = ToRegister(instr->InputAt(0));
+ __ testl(input_reg, input_reg);
+ Label is_positive;
+ __ j(not_sign, &is_positive);
+ __ negl(input_reg); // Sets flags.
+ DeoptimizeIf(negative, instr->environment());
+ __ bind(&is_positive);
}
void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
- Abort("Unimplemented: %s", "DoMathAbs");
+ // Class for deferred case.
+ class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
+ public:
+ DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
+ LUnaryMathOperation* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() {
+ codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
+ }
+ private:
+ LUnaryMathOperation* instr_;
+ };
+
+ ASSERT(instr->InputAt(0)->Equals(instr->result()));
+ Representation r = instr->hydrogen()->value()->representation();
+
+ if (r.IsDouble()) {
+ XMMRegister scratch = xmm0;
+ XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
+ __ xorpd(scratch, scratch);
+ __ subsd(scratch, input_reg);
+ __ andpd(input_reg, scratch);
+ } else if (r.IsInteger32()) {
+ EmitIntegerMathAbs(instr);
+ } else { // Tagged case.
+ DeferredMathAbsTaggedHeapNumber* deferred =
+ new DeferredMathAbsTaggedHeapNumber(this, instr);
+ Register input_reg = ToRegister(instr->InputAt(0));
+ // Smi check.
+ __ JumpIfNotSmi(input_reg, deferred->entry());
+ EmitIntegerMathAbs(instr);
+ __ bind(deferred->exit());
+ }
}
@@ -2360,17 +2467,26 @@ void LCodeGen::DoPower(LPower* instr) {
void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
- Abort("Unimplemented: %s", "DoMathLog");
+ ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
+ TranscendentalCacheStub stub(TranscendentalCache::LOG,
+ TranscendentalCacheStub::UNTAGGED);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
- Abort("Unimplemented: %s", "DoMathCos");
+ ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
+ TranscendentalCacheStub stub(TranscendentalCache::LOG,
+ TranscendentalCacheStub::UNTAGGED);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
- Abort("Unimplemented: %s", "DoMathSin");
+ ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
+ TranscendentalCacheStub stub(TranscendentalCache::LOG,
+ TranscendentalCacheStub::UNTAGGED);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
@@ -2414,6 +2530,7 @@ void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
int arity = instr->arity();
Handle<Code> ic = StubCache::ComputeKeyedCallInitialize(arity, NOT_IN_LOOP);
CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
}
diff --git a/deps/v8/src/x64/lithium-codegen-x64.h b/deps/v8/src/x64/lithium-codegen-x64.h
index 52409f207..1cac4e9df 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.h
+++ b/deps/v8/src/x64/lithium-codegen-x64.h
@@ -186,6 +186,7 @@ class LCodeGen BASE_EMBEDDED {
XMMRegister ToDoubleRegister(int index) const;
// Specific math operations - used from DoUnaryMathOperation.
+ void EmitIntegerMathAbs(LUnaryMathOperation* instr);
void DoMathAbs(LUnaryMathOperation* instr);
void DoMathFloor(LUnaryMathOperation* instr);
void DoMathRound(LUnaryMathOperation* instr);
diff --git a/deps/v8/src/x64/lithium-x64.cc b/deps/v8/src/x64/lithium-x64.cc
index 2ed109d13..8db1ba906 100644
--- a/deps/v8/src/x64/lithium-x64.cc
+++ b/deps/v8/src/x64/lithium-x64.cc
@@ -1502,8 +1502,10 @@ LInstruction* LChunkBuilder::DoIsSmi(HIsSmi* instr) {
LInstruction* LChunkBuilder::DoHasInstanceType(HHasInstanceType* instr) {
- Abort("Unimplemented: %s", "DoHasInstanceType");
- return NULL;
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+
+ return DefineAsRegister(new LHasInstanceType(value));
}
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index 48e42c85c..8845bbb77 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -136,7 +136,7 @@ void MacroAssembler::RecordWrite(Register object,
Register value) {
// The compiled code assumes that record write doesn't change the
// context register, so we check that none of the clobbered
- // registers are esi.
+ // registers are rsi.
ASSERT(!object.is(rsi) && !value.is(rsi) && !address.is(rsi));
// First, check if a write barrier is even needed. The tests below
@@ -1503,6 +1503,11 @@ void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Register src) {
}
+void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
+ movq(dst, SafepointRegisterSlot(src));
+}
+
+
Operand MacroAssembler::SafepointRegisterSlot(Register reg) {
return Operand(rsp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
}
@@ -2531,9 +2536,21 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
}
// The context may be an intermediate context, not a function context.
movq(dst, Operand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
- } else { // context is the current function context.
- // The context may be an intermediate context, not a function context.
- movq(dst, Operand(rsi, Context::SlotOffset(Context::FCONTEXT_INDEX)));
+ } else {
+ // Slot is in the current function context. Move it into the
+ // destination register in case we store into it (the write barrier
+ // cannot be allowed to destroy the context in rsi).
+ movq(dst, rsi);
+ }
+
+ // We should not have found a 'with' context by walking the context chain
+ // (i.e., the static scope chain and runtime context chain do not agree).
+ // A variable occurring in such a scope should have slot type LOOKUP and
+ // not CONTEXT.
+ if (FLAG_debug_code) {
+ cmpq(dst, Operand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
+ Check(equal, "Yo dawg, I heard you liked function contexts "
+ "so I put function contexts in all your contexts");
}
}
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index 4c5c60c8f..4cf59c4e8 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -174,7 +174,7 @@ class MacroAssembler: public Assembler {
// Store the value in register src in the safepoint register stack
// slot for register dst.
void StoreToSafepointRegisterSlot(Register dst, Register src);
-
+ void LoadFromSafepointRegisterSlot(Register dst, Register src);
// ---------------------------------------------------------------------------
// JavaScript invokes
diff --git a/deps/v8/src/x64/regexp-macro-assembler-x64.cc b/deps/v8/src/x64/regexp-macro-assembler-x64.cc
index 27f3482a9..cd3bfbd42 100644
--- a/deps/v8/src/x64/regexp-macro-assembler-x64.cc
+++ b/deps/v8/src/x64/regexp-macro-assembler-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
diff --git a/deps/v8/src/x64/regexp-macro-assembler-x64.h b/deps/v8/src/x64/regexp-macro-assembler-x64.h
index 182bc5529..421a22944 100644
--- a/deps/v8/src/x64/regexp-macro-assembler-x64.h
+++ b/deps/v8/src/x64/regexp-macro-assembler-x64.h
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
diff --git a/deps/v8/src/x64/simulator-x64.h b/deps/v8/src/x64/simulator-x64.h
index 3a62ffd5c..aa2994f26 100644
--- a/deps/v8/src/x64/simulator-x64.h
+++ b/deps/v8/src/x64/simulator-x64.h
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
diff --git a/deps/v8/src/x64/stub-cache-x64.cc b/deps/v8/src/x64/stub-cache-x64.cc
index c27e1b8c4..774de7184 100644
--- a/deps/v8/src/x64/stub-cache-x64.cc
+++ b/deps/v8/src/x64/stub-cache-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
diff --git a/deps/v8/src/x64/virtual-frame-x64.cc b/deps/v8/src/x64/virtual-frame-x64.cc
index 31f9527a6..ea115f28a 100644
--- a/deps/v8/src/x64/virtual-frame-x64.cc
+++ b/deps/v8/src/x64/virtual-frame-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
diff --git a/deps/v8/src/x64/virtual-frame-x64.h b/deps/v8/src/x64/virtual-frame-x64.h
index 4a9c72034..824743d1d 100644
--- a/deps/v8/src/x64/virtual-frame-x64.h
+++ b/deps/v8/src/x64/virtual-frame-x64.h
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -400,6 +400,8 @@ class VirtualFrame : public ZoneObject {
// Uses kScratchRegister, emits appropriate relocation info.
void EmitPush(Handle<Object> value);
+ inline bool ConstantPoolOverflowed();
+
// Push an element on the virtual frame.
inline void Push(Register reg, TypeInfo info = TypeInfo::Unknown());
inline void Push(Handle<Object> value);
diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc
index 0da3f1cdc..b3c52f1f1 100644
--- a/deps/v8/test/cctest/test-api.cc
+++ b/deps/v8/test/cctest/test-api.cc
@@ -7627,10 +7627,11 @@ static void GenerateSomeGarbage() {
"garbage = undefined;");
}
+
v8::Handle<v8::Value> DirectApiCallback(const v8::Arguments& args) {
static int count = 0;
if (count++ % 3 == 0) {
- v8::V8::LowMemoryNotification(); // This should move the stub
+ i::Heap::CollectAllGarbage(true); // This should move the stub
GenerateSomeGarbage(); // This should ensure the old stub memory is flushed
}
return v8::Handle<v8::Value>();
@@ -7682,6 +7683,54 @@ THREADED_TEST(CallICFastApi_DirectCall_Throw) {
}
+v8::Handle<v8::Value> DirectGetterCallback(Local<String> name,
+ const v8::AccessorInfo& info) {
+ if (++p_getter_count % 3 == 0) {
+ i::Heap::CollectAllGarbage(true);
+ GenerateSomeGarbage();
+ }
+ return v8::Handle<v8::Value>();
+}
+
+
+THREADED_TEST(LoadICFastApi_DirectCall_GCMoveStub) {
+ v8::HandleScope scope;
+ LocalContext context;
+ v8::Handle<v8::ObjectTemplate> obj = v8::ObjectTemplate::New();
+ obj->SetAccessor(v8_str("p1"), DirectGetterCallback);
+ context->Global()->Set(v8_str("o1"), obj->NewInstance());
+ p_getter_count = 0;
+ CompileRun(
+ "function f() {"
+ " for (var i = 0; i < 30; i++) o1.p1;"
+ "}"
+ "f();");
+ CHECK_EQ(30, p_getter_count);
+}
+
+
+v8::Handle<v8::Value> ThrowingDirectGetterCallback(
+ Local<String> name, const v8::AccessorInfo& info) {
+ return v8::ThrowException(v8_str("g"));
+}
+
+
+THREADED_TEST(LoadICFastApi_DirectCall_Throw) {
+ v8::HandleScope scope;
+ LocalContext context;
+ v8::Handle<v8::ObjectTemplate> obj = v8::ObjectTemplate::New();
+ obj->SetAccessor(v8_str("p1"), ThrowingDirectGetterCallback);
+ context->Global()->Set(v8_str("o1"), obj->NewInstance());
+ v8::Handle<Value> result = CompileRun(
+ "var result = '';"
+ "for (var i = 0; i < 5; i++) {"
+ " try { o1.p1; } catch (e) { result += e; }"
+ "}"
+ "result;");
+ CHECK_EQ(v8_str("ggggg"), result);
+}
+
+
THREADED_TEST(InterceptorCallICFastApi_TrivialSignature) {
int interceptor_call_count = 0;
v8::HandleScope scope;
diff --git a/deps/v8/test/mjsunit/array-concat.js b/deps/v8/test/mjsunit/array-concat.js
index db89f4d0b..97bd85aca 100644
--- a/deps/v8/test/mjsunit/array-concat.js
+++ b/deps/v8/test/mjsunit/array-concat.js
@@ -101,7 +101,6 @@ while (pos = poses.shift()) {
assertEquals("undefined", typeof(c[-1]));
assertEquals("undefined", typeof(c[0xffffffff]));
assertEquals(c.length, a.length + 1);
-
}
poses = [140, 4000000000];
@@ -193,3 +192,46 @@ for (var i = 0; i < holey.length; i++) {
assertTrue(i in holey);
}
}
+
+// Polluted prototype from prior tests.
+delete Array.prototype[123];
+
+// Check that concat reads getters in the correct order.
+var arr1 = [,2];
+var arr2 = [1,3];
+var r1 = [].concat(arr1, arr2); // [,2,1,3]
+assertEquals([,2,1,3], r1);
+
+// Make first array change length of second array.
+Object.defineProperty(arr1, 0, {get: function() {
+ arr2.push("X");
+ return undefined;
+ }, configurable: true})
+var r2 = [].concat(arr1, arr2); // [undefined,2,1,3,"X"]
+assertEquals([undefined,2,1,3,"X"], r2);
+
+// Make first array change length of second array massively.
+arr2.length = 2;
+Object.defineProperty(arr1, 0, {get: function() {
+ arr2[500000] = "X";
+ return undefined;
+ }, configurable: true})
+var r3 = [].concat(arr1, arr2); // [undefined,2,1,3,"X"]
+var expected = [undefined,2,1,3];
+expected[500000 + 2] = "X";
+
+assertEquals(expected, r3);
+
+var arr3 = [];
+var trace = [];
+var expectedTrace = []
+function mkGetter(i) { return function() { trace.push(i); }; }
+arr3.length = 10000;
+for (var i = 0; i < 100; i++) {
+ Object.defineProperty(arr3, i * i, {get: mkGetter(i)});
+ expectedTrace[i] = i;
+ expectedTrace[100 + i] = i;
+}
+var r4 = [0].concat(arr3, arr3);
+assertEquals(1 + arr3.length * 2, r4.length);
+assertEquals(expectedTrace, trace);
diff --git a/deps/v8/test/mjsunit/regress/regress-1145.js b/deps/v8/test/mjsunit/regress/regress-1145.js
new file mode 100644
index 000000000..16d5527bb
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1145.js
@@ -0,0 +1,54 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --opt-eagerly --debug-code --lazy
+
+// See: http://code.google.com/p/v8/issues/detail?id=1145
+// Should not throw a syntax error exception (change this if we make lazily
+// compiled functions with syntax errors into early errors).
+// Should not hit an assertion in debug mode.
+
+// A lazily compiled function with a syntax error that is attempted inlined
+// would set a pending exception that is then ignored (until it triggers
+// an assert).
+// This file must be at least 1024 bytes long to trigger lazy compilation.
+
+function f() { return 1; }
+
+// Must be lazy. Must throw SyntaxError during compilation.
+function fail() { continue; }
+
+function opt_me() {
+ var x = 1;
+ // Do lots of function calls and hope to be optimized.
+ for (var i = 0; i < 1000000; i++) {
+ x = f();
+ }
+ if (x == 0) fail(); // Hope to be inlined during optimization.
+}
+
+opt_me();
diff --git a/deps/v8/test/mjsunit/regress/regress-1172-bis.js b/deps/v8/test/mjsunit/regress/regress-1172-bis.js
new file mode 100644
index 000000000..e8d5c8127
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1172-bis.js
@@ -0,0 +1,37 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Verifies that exception thrown from JS accessors when attempting a call
+// are properly treated.
+
+Object.prototype.__defineGetter__(0, function() { throw 42; });
+try {
+ Object[0]();
+ assertUnreachable();
+} catch(e) {
+ assertEquals(42, e);
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-1181.js b/deps/v8/test/mjsunit/regress/regress-1181.js
new file mode 100644
index 000000000..d45a0bee3
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1181.js
@@ -0,0 +1,54 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The first count times, test is called with an integer argument and
+// crankshaft produces code for int32 representation. Test that the
+// implementation correctly deoptimizes.
+
+// Flags: --allow-natives-syntax
+
+function test(x) {
+ var xp = x * 1 - 1;
+ return xp;
+}
+
+
+function check(count) {
+ %DeoptimizeFunction(test);
+ var i;
+ for(var x=0; x < count; x++){
+ for(var y=0; y < count; y++){
+ i = test(x / 100);
+ }
+ }
+ assertEquals((count - 1) / 100, i + 1);
+}
+
+
+check(150);
+check(200);
+check(350); \ No newline at end of file